aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-07-13 19:25:18 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-07-13 19:25:18 +0000
commitca089b24d48ef6fa8da2d0bb8c25bb802c4a95c0 (patch)
tree3a28a772df9b17aef34f49e3c727965ad28c0c93
parent9df3605dea17e84f8183581f6103bd0c79e2a606 (diff)
downloadsrc-ca089b24d48ef6fa8da2d0bb8c25bb802c4a95c0.tar.gz
src-ca089b24d48ef6fa8da2d0bb8c25bb802c4a95c0.zip
Vendor import of llvm trunk r307894:vendor/llvm/llvm-trunk-r307894
Notes
Notes: svn path=/vendor/llvm/dist/; revision=320957 svn path=/vendor/llvm/llvm-trunk-r307894/; revision=320958; tag=vendor/llvm/llvm-trunk-r307894
-rw-r--r--CMakeLists.txt4
-rwxr-xr-xcmake/modules/AddLLVM.cmake26
-rw-r--r--cmake/modules/HandleLLVMOptions.cmake4
-rw-r--r--cmake/modules/LLVMExternalProjectUtils.cmake10
-rw-r--r--docs/AMDGPUUsage.rst4
-rw-r--r--docs/CMake.rst5
-rw-r--r--docs/CMakePrimer.rst27
-rw-r--r--docs/CommandGuide/lit.rst7
-rw-r--r--docs/CommandGuide/llvm-cov.rst6
-rw-r--r--docs/CommandGuide/llvm-profdata.rst6
-rw-r--r--docs/Coroutines.rst2
-rw-r--r--docs/Docker.rst8
-rw-r--r--docs/Dummy.html0
-rw-r--r--docs/HowToAddABuilder.rst3
-rw-r--r--docs/LangRef.rst236
-rw-r--r--docs/LibFuzzer.rst2
-rw-r--r--docs/tutorial/BuildingAJIT1.rst8
-rw-r--r--docs/tutorial/BuildingAJIT2.rst4
-rw-r--r--docs/tutorial/LangImpl02.rst2
-rw-r--r--docs/tutorial/LangImpl03.rst6
-rw-r--r--docs/tutorial/LangImpl04.rst2
-rw-r--r--docs/tutorial/LangImpl05.rst4
-rw-r--r--docs/tutorial/LangImpl06.rst4
-rw-r--r--docs/tutorial/OCamlLangImpl5.rst2
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter1/KaleidoscopeJIT.h8
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp2
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter2/KaleidoscopeJIT.h8
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp2
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter3/KaleidoscopeJIT.h7
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp2
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter4/KaleidoscopeJIT.h10
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter4/toy.cpp2
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter5/KaleidoscopeJIT.h25
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter5/toy.cpp2
-rw-r--r--examples/Kaleidoscope/Chapter4/toy.cpp2
-rw-r--r--examples/Kaleidoscope/Chapter5/toy.cpp2
-rw-r--r--examples/Kaleidoscope/Chapter6/toy.cpp2
-rw-r--r--examples/Kaleidoscope/Chapter7/toy.cpp2
-rw-r--r--examples/Kaleidoscope/include/KaleidoscopeJIT.h10
-rw-r--r--include/llvm-c/OrcBindings.h28
-rw-r--r--include/llvm/ADT/APInt.h6
-rw-r--r--include/llvm/ADT/STLExtras.h4
-rw-r--r--include/llvm/ADT/SmallPtrSet.h11
-rw-r--r--include/llvm/Analysis/BlockFrequencyInfoImpl.h2
-rw-r--r--include/llvm/Analysis/CGSCCPassManager.h17
-rw-r--r--include/llvm/Analysis/InlineCost.h2
-rw-r--r--include/llvm/Analysis/LazyCallGraph.h18
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h3
-rw-r--r--include/llvm/Analysis/RegionInfoImpl.h8
-rw-r--r--include/llvm/Analysis/TargetTransformInfo.h41
-rw-r--r--include/llvm/Analysis/TargetTransformInfoImpl.h14
-rw-r--r--include/llvm/Analysis/ValueTracking.h3
-rw-r--r--include/llvm/BinaryFormat/Wasm.h4
-rw-r--r--include/llvm/Bitcode/LLVMBitCodes.h12
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h4
-rw-r--r--include/llvm/CodeGen/BasicTTIImpl.h2
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelector.h158
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h337
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizerHelper.h8
-rw-r--r--include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h53
-rw-r--r--include/llvm/CodeGen/LiveRegUnits.h10
-rw-r--r--include/llvm/CodeGen/MachineFunction.h2
-rw-r--r--include/llvm/CodeGen/MachineMemOperand.h15
-rw-r--r--include/llvm/CodeGen/RuntimeLibcalls.h23
-rw-r--r--include/llvm/CodeGen/ScheduleDAG.h8
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h4
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h8
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolRecord.h4
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeIndex.h21
-rw-r--r--include/llvm/DebugInfo/DIContext.h24
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFContext.h25
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h6
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStream.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h12
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h49
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h6
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeSession.h7
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PDBStringTable.h1
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PublicsStream.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h54
-rw-r--r--include/llvm/DebugInfo/PDB/Native/RawTypes.h13
-rw-r--r--include/llvm/ExecutionEngine/JITSymbol.h88
-rw-r--r--include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h177
-rw-r--r--include/llvm/ExecutionEngine/Orc/ExecutionUtils.h25
-rw-r--r--include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h36
-rw-r--r--include/llvm/ExecutionEngine/Orc/IRCompileLayer.h18
-rw-r--r--include/llvm/ExecutionEngine/Orc/IRTransformLayer.h16
-rw-r--r--include/llvm/ExecutionEngine/Orc/LambdaResolver.h2
-rw-r--r--include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h88
-rw-r--r--include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h17
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcError.h15
-rw-r--r--include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h31
-rw-r--r--include/llvm/ExecutionEngine/RuntimeDyld.h15
-rw-r--r--include/llvm/IR/Constants.h10
-rw-r--r--include/llvm/IR/IRBuilder.h20
-rw-r--r--include/llvm/IR/Instructions.h210
-rw-r--r--include/llvm/IR/IntrinsicInst.h169
-rw-r--r--include/llvm/IR/Intrinsics.td16
-rw-r--r--include/llvm/IR/LLVMContext.h28
-rw-r--r--include/llvm/IR/Module.h2
-rw-r--r--include/llvm/IR/ModuleSummaryIndex.h10
-rw-r--r--include/llvm/IR/PassManager.h31
-rw-r--r--include/llvm/IR/PatternMatch.h73
-rw-r--r--include/llvm/IR/SafepointIRVerifier.h35
-rw-r--r--include/llvm/IR/Type.h6
-rw-r--r--include/llvm/InitializePasses.h2
-rw-r--r--include/llvm/MC/MCAsmBackend.h2
-rw-r--r--include/llvm/MC/MCDisassembler/MCDisassembler.h1
-rw-r--r--include/llvm/MC/MCMachObjectWriter.h3
-rw-r--r--include/llvm/MC/MCObjectWriter.h2
-rw-r--r--include/llvm/MC/MCSymbolWasm.h14
-rw-r--r--include/llvm/Object/COFF.h3
-rw-r--r--include/llvm/Object/Wasm.h14
-rw-r--r--include/llvm/ObjectYAML/WasmYAML.h3
-rw-r--r--include/llvm/Option/OptTable.h3
-rw-r--r--include/llvm/Passes/PassBuilder.h294
-rw-r--r--include/llvm/ProfileData/InstrProf.h54
-rw-r--r--include/llvm/ProfileData/InstrProfReader.h37
-rw-r--r--include/llvm/ProfileData/InstrProfWriter.h16
-rw-r--r--include/llvm/ProfileData/ProfileCommon.h3
-rw-r--r--include/llvm/Support/BlockFrequency.h4
-rw-r--r--include/llvm/Support/Compiler.h10
-rw-r--r--include/llvm/Support/DynamicLibrary.h16
-rw-r--r--include/llvm/Support/ErrorHandling.h48
-rw-r--r--include/llvm/Support/GenericDomTreeConstruction.h169
-rw-r--r--include/llvm/Support/ReverseIteration.h17
-rw-r--r--include/llvm/Support/UnicodeCharRanges.h7
-rw-r--r--include/llvm/Target/GlobalISel/SelectionDAGCompat.td1
-rw-r--r--include/llvm/Target/TargetInstrInfo.h10
-rw-r--r--include/llvm/Target/TargetLowering.h30
-rw-r--r--include/llvm/Transforms/Scalar/GVN.h30
-rw-r--r--include/llvm/Transforms/Utils/LowerMemIntrinsics.h26
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdaterImpl.h10
-rw-r--r--include/llvm/module.modulemap1
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp2
-rw-r--r--lib/Analysis/BranchProbabilityInfo.cpp2
-rw-r--r--lib/Analysis/CGSCCPassManager.cpp206
-rw-r--r--lib/Analysis/CaptureTracking.cpp4
-rw-r--r--lib/Analysis/DemandedBits.cpp15
-rw-r--r--lib/Analysis/DependenceAnalysis.cpp7
-rw-r--r--lib/Analysis/InstructionSimplify.cpp15
-rw-r--r--lib/Analysis/LazyCallGraph.cpp20
-rw-r--r--lib/Analysis/Lint.cpp2
-rw-r--r--lib/Analysis/LoopInfo.cpp6
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp41
-rw-r--r--lib/Analysis/ModuleSummaryAnalysis.cpp2
-rw-r--r--lib/Analysis/ScalarEvolution.cpp20
-rw-r--r--lib/Analysis/TargetTransformInfo.cpp25
-rw-r--r--lib/Analysis/ValueTracking.cpp53
-rw-r--r--lib/Analysis/VectorUtils.cpp2
-rw-r--r--lib/AsmParser/LLLexer.cpp2
-rw-r--r--lib/AsmParser/LLParser.cpp74
-rw-r--r--lib/AsmParser/LLParser.h3
-rw-r--r--lib/AsmParser/LLToken.h2
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp92
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp64
-rw-r--r--lib/CodeGen/AtomicExpandPass.cpp8
-rw-r--r--lib/CodeGen/CodeGen.cpp1
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp37
-rw-r--r--lib/CodeGen/GlobalISel/IRTranslator.cpp4
-rw-r--r--lib/CodeGen/GlobalISel/InstructionSelector.cpp7
-rw-r--r--lib/CodeGen/GlobalISel/LegalizerHelper.cpp36
-rw-r--r--lib/CodeGen/GlobalISel/MachineIRBuilder.cpp42
-rw-r--r--lib/CodeGen/LiveRegUnits.cpp2
-rw-r--r--lib/CodeGen/MIRParser/MILexer.cpp10
-rw-r--r--lib/CodeGen/MIRParser/MILexer.h6
-rw-r--r--lib/CodeGen/MIRParser/MIParser.cpp85
-rw-r--r--lib/CodeGen/MIRPrinter.cpp54
-rw-r--r--lib/CodeGen/MachineBlockPlacement.cpp37
-rw-r--r--lib/CodeGen/MachineFunction.cpp12
-rw-r--r--lib/CodeGen/MachineInstr.cpp20
-rw-r--r--lib/CodeGen/MachineVerifier.cpp8
-rw-r--r--lib/CodeGen/MacroFusion.cpp2
-rw-r--r--lib/CodeGen/PostRAHazardRecognizer.cpp2
-rw-r--r--lib/CodeGen/RegAllocFast.cpp13
-rw-r--r--lib/CodeGen/RegAllocGreedy.cpp2
-rw-r--r--lib/CodeGen/RegAllocPBQP.cpp2
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp28
-rw-r--r--lib/CodeGen/RegisterScavenging.cpp15
-rw-r--r--lib/CodeGen/ScheduleDAG.cpp91
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp199
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp20
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp93
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.cpp16
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.h20
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp55
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp28
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp134
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h6
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp9
-rw-r--r--lib/CodeGen/SjLjEHPrepare.cpp7
-rw-r--r--lib/CodeGen/SplitKit.cpp8
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp54
-rw-r--r--lib/DebugInfo/CodeView/SymbolDumper.cpp91
-rw-r--r--lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp12
-rw-r--r--lib/DebugInfo/DWARF/DWARFContext.cpp124
-rw-r--r--lib/DebugInfo/DWARF/DWARFDie.cpp2
-rw-r--r--lib/DebugInfo/PDB/CMakeLists.txt2
-rw-r--r--lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp6
-rw-r--r--lib/DebugInfo/PDB/Native/DbiStream.cpp7
-rw-r--r--lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp26
-rw-r--r--lib/DebugInfo/PDB/Native/NamedStreamMap.cpp6
-rw-r--r--lib/DebugInfo/PDB/Native/NativeBuiltinSymbol.cpp48
-rw-r--r--lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp2
-rw-r--r--lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp2
-rw-r--r--lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp2
-rw-r--r--lib/DebugInfo/PDB/Native/NativeSession.cpp60
-rw-r--r--lib/DebugInfo/PDB/Native/PDBFile.cpp9
-rw-r--r--lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp27
-rw-r--r--lib/DebugInfo/PDB/Native/PDBStringTable.cpp2
-rw-r--r--lib/DebugInfo/PDB/Native/PublicsStream.cpp16
-rw-r--r--lib/DebugInfo/PDB/Native/PublicsStreamBuilder.cpp89
-rw-r--r--lib/ExecutionEngine/MCJIT/MCJIT.cpp19
-rw-r--r--lib/ExecutionEngine/Orc/OrcCBindings.cpp35
-rw-r--r--lib/ExecutionEngine/Orc/OrcCBindingsStack.h104
-rw-r--r--lib/ExecutionEngine/Orc/OrcError.cpp21
-rw-r--r--lib/ExecutionEngine/Orc/OrcMCJITReplacement.h23
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp33
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp7
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp3
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp3
-rw-r--r--lib/Fuzzer/CMakeLists.txt2
-rw-r--r--lib/Fuzzer/FuzzerCorpus.h81
-rw-r--r--lib/Fuzzer/FuzzerDriver.cpp6
-rw-r--r--lib/Fuzzer/FuzzerExtFunctionsWeak.cpp3
-rw-r--r--lib/Fuzzer/FuzzerFlags.def4
-rw-r--r--lib/Fuzzer/FuzzerIOWindows.cpp4
-rw-r--r--lib/Fuzzer/FuzzerInternal.h8
-rw-r--r--lib/Fuzzer/FuzzerLoop.cpp66
-rw-r--r--lib/Fuzzer/FuzzerOptions.h1
-rw-r--r--lib/Fuzzer/FuzzerUtilDarwin.cpp13
-rw-r--r--lib/Fuzzer/test/CMakeLists.txt3
-rw-r--r--lib/Fuzzer/test/FuzzerUnittest.cpp5
-rw-r--r--lib/Fuzzer/test/ShrinkControlFlowSimpleTest.cpp19
-rw-r--r--lib/Fuzzer/test/reduce_inputs.test13
-rw-r--r--lib/IR/AsmWriter.cpp66
-rw-r--r--lib/IR/CMakeLists.txt1
-rw-r--r--lib/IR/ConstantFold.cpp38
-rw-r--r--lib/IR/Constants.cpp73
-rw-r--r--lib/IR/Core.cpp22
-rw-r--r--lib/IR/Instruction.cpp11
-rw-r--r--lib/IR/Instructions.cpp74
-rw-r--r--lib/IR/LLVMContext.cpp20
-rw-r--r--lib/IR/LLVMContextImpl.cpp14
-rw-r--r--lib/IR/LLVMContextImpl.h14
-rw-r--r--lib/IR/Module.cpp4
-rw-r--r--lib/IR/SafepointIRVerifier.cpp437
-rw-r--r--lib/IR/Type.cpp2
-rw-r--r--lib/IR/Verifier.cpp96
-rw-r--r--lib/LTO/LTO.cpp11
-rw-r--r--lib/Linker/IRMover.cpp18
-rw-r--r--lib/MC/ELFObjectWriter.cpp10
-rw-r--r--lib/MC/MCAssembler.cpp16
-rw-r--r--lib/MC/MachObjectWriter.cpp2
-rw-r--r--lib/MC/WasmObjectWriter.cpp161
-rw-r--r--lib/MC/WinCOFFObjectWriter.cpp11
-rw-r--r--lib/Object/WasmObjectFile.cpp40
-rw-r--r--lib/Object/WindowsResource.cpp4
-rw-r--r--lib/ObjectYAML/WasmYAML.cpp3
-rw-r--r--lib/Option/OptTable.cpp8
-rw-r--r--lib/Passes/PassBuilder.cpp262
-rw-r--r--lib/ProfileData/InstrProf.cpp46
-rw-r--r--lib/ProfileData/InstrProfReader.cpp20
-rw-r--r--lib/ProfileData/InstrProfWriter.cpp43
-rw-r--r--lib/Support/CommandLine.cpp2
-rw-r--r--lib/Support/DynamicLibrary.cpp43
-rw-r--r--lib/Support/ErrorHandling.cpp62
-rw-r--r--lib/Support/Host.cpp405
-rw-r--r--lib/Support/Mutex.cpp5
-rw-r--r--lib/Support/Unix/DynamicLibrary.inc3
-rw-r--r--lib/Support/Unix/Host.inc25
-rw-r--r--lib/Support/Unix/Program.inc3
-rw-r--r--lib/Support/Windows/DynamicLibrary.inc2
-rw-r--r--lib/Support/Windows/Host.inc4
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp2
-rw-r--r--lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp2
-rw-r--r--lib/Target/AArch64/AArch64CondBrTuning.cpp2
-rw-r--r--lib/Target/AArch64/AArch64FastISel.cpp2
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp8
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp32
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.h6
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td11
-rw-r--r--lib/Target/AArch64/AArch64InstructionSelector.cpp13
-rw-r--r--lib/Target/AArch64/AArch64LegalizerInfo.cpp5
-rw-r--r--lib/Target/AArch64/AArch64RedundantCopyElimination.cpp1
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.cpp5
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.h7
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp6
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp4
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp4
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp2
-rw-r--r--lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp9
-rw-r--r--lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp4
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.cpp7
-rw-r--r--lib/Target/AMDGPU/AMDGPULowerIntrinsics.cpp13
-rw-r--r--lib/Target/AMDGPU/AMDGPUMacroFusion.cpp64
-rw-r--r--lib/Target/AMDGPU/AMDGPUMacroFusion.h19
-rw-r--r--lib/Target/AMDGPU/AMDGPUSubtarget.cpp58
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetMachine.cpp47
-rw-r--r--lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp166
-rw-r--r--lib/Target/AMDGPU/CMakeLists.txt1
-rw-r--r--lib/Target/AMDGPU/GCNIterativeScheduler.cpp2
-rw-r--r--lib/Target/AMDGPU/GCNMinRegStrategy.cpp2
-rw-r--r--lib/Target/AMDGPU/GCNRegPressure.cpp2
-rw-r--r--lib/Target/AMDGPU/GCNSchedStrategy.cpp2
-rw-r--r--lib/Target/AMDGPU/GCNSchedStrategy.h2
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp4
-rw-r--r--lib/Target/AMDGPU/MIMGInstructions.td1
-rw-r--r--lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp2
-rw-r--r--lib/Target/AMDGPU/R600ISelLowering.cpp3
-rw-r--r--lib/Target/AMDGPU/R600ISelLowering.h3
-rw-r--r--lib/Target/AMDGPU/R600MachineScheduler.cpp2
-rw-r--r--lib/Target/AMDGPU/SIFoldOperands.cpp1
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp120
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.h3
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.cpp20
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.h8
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.td2
-rw-r--r--lib/Target/AMDGPU/SIMachineScheduler.cpp2
-rw-r--r--lib/Target/AMDGPU/SIShrinkInstructions.cpp78
-rw-r--r--lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp2
-rw-r--r--lib/Target/AMDGPU/VOP3PInstructions.td28
-rw-r--r--lib/Target/AMDGPU/VOPInstructions.td18
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.cpp1
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp3
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp19
-rw-r--r--lib/Target/ARM/ARMCallLowering.cpp2
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp20
-rw-r--r--lib/Target/ARM/ARMISelLowering.h3
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td2
-rw-r--r--lib/Target/ARM/ARMInstructionSelector.cpp312
-rw-r--r--lib/Target/ARM/ARMLegalizerInfo.cpp207
-rw-r--r--lib/Target/ARM/ARMLegalizerInfo.h33
-rw-r--r--lib/Target/ARM/ARMRegisterBankInfo.cpp32
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.cpp18
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.h36
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp11
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp15
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h8
-rw-r--r--lib/Target/ARM/Thumb1FrameLowering.cpp2
-rw-r--r--lib/Target/AVR/AVRAsmPrinter.cpp5
-rw-r--r--lib/Target/AVR/AVRDevices.td23
-rw-r--r--lib/Target/AVR/AVRInstrInfo.cpp72
-rw-r--r--lib/Target/AVR/AVRInstrInfo.h4
-rw-r--r--lib/Target/AVR/AVRInstrInfo.td32
-rw-r--r--lib/Target/AVR/AVRMCInstLower.cpp16
-rw-r--r--lib/Target/AVR/AVRRegisterInfo.cpp11
-rw-r--r--lib/Target/AVR/AVRRegisterInfo.td7
-rw-r--r--lib/Target/AVR/AVRTargetMachine.cpp6
-rw-r--r--lib/Target/AVR/AsmParser/AVRAsmParser.cpp1
-rw-r--r--lib/Target/AVR/InstPrinter/AVRInstPrinter.cpp2
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp2
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp4
-rw-r--r--lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonBitSimplify.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonBitTracker.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonConstPropagation.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp14
-rw-r--r--lib/Target/Hexagon/HexagonGenPredicate.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonISelDAGToDAG.cpp46
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp56
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.h27
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp74
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h21
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonPatterns.td165
-rw-r--r--lib/Target/Hexagon/HexagonPseudo.td10
-rw-r--r--lib/Target/Hexagon/HexagonSplitDouble.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.cpp10
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp12
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp7
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h13
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp2
-rw-r--r--lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp1
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp4
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp282
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp32
-rw-r--r--lib/Target/Mips/Mips.td2
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td11
-rw-r--r--lib/Target/Mips/MipsMTInstrFormats.td99
-rw-r--r--lib/Target/Mips/MipsMTInstrInfo.td208
-rw-r--r--lib/Target/Mips/MipsSchedule.td18
-rw-r--r--lib/Target/Mips/MipsScheduleGeneric.td14
-rw-r--r--lib/Target/Mips/MipsScheduleP5600.td2
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp3
-rw-r--r--lib/Target/Mips/MipsSubtarget.h4
-rw-r--r--lib/Target/Mips/MipsTargetStreamer.h9
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp3
-rw-r--r--lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp35
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp2
-rw-r--r--lib/Target/PowerPC/PPCCTRLoops.cpp5
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp36
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp43
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp177
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h7
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.td191
-rw-r--r--lib/Target/PowerPC/PPCInstrVSX.td190
-rw-r--r--lib/Target/PowerPC/PPCScheduleP9.td4
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.h7
-rw-r--r--lib/Target/PowerPC/PPCVSXSwapRemoval.cpp4
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp4
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp3
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp4
-rw-r--r--lib/Target/SystemZ/SystemZHazardRecognizer.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp97
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.h15
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.td76
-rw-r--r--lib/Target/SystemZ/SystemZLDCleanup.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZMachineScheduler.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZOperators.td18
-rw-r--r--lib/Target/SystemZ/SystemZScheduleZ13.td214
-rw-r--r--lib/Target/SystemZ/SystemZScheduleZ196.td4
-rw-r--r--lib/Target/SystemZ/SystemZScheduleZEC12.td4
-rw-r--r--lib/Target/SystemZ/SystemZTargetTransformInfo.cpp5
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp28
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h8
-rw-r--r--lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp7
-rw-r--r--lib/Target/WebAssembly/WebAssemblyCFGSort.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp41
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp4
-rw-r--r--lib/Target/X86/InstPrinter/X86InstComments.cpp4
-rw-r--r--lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp2
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.cpp56
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.h8
-rw-r--r--lib/Target/X86/X86.td1
-rw-r--r--lib/Target/X86/X86CallLowering.cpp47
-rw-r--r--lib/Target/X86/X86CallLowering.h2
-rw-r--r--lib/Target/X86/X86CallingConv.td10
-rw-r--r--lib/Target/X86/X86FastISel.cpp3
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp5
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp537
-rw-r--r--lib/Target/X86/X86ISelLowering.h13
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp2
-rw-r--r--lib/Target/X86/X86InstructionSelector.cpp153
-rw-r--r--lib/Target/X86/X86LegalizerInfo.cpp12
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp195
-rw-r--r--lib/Target/X86/X86SchedSandyBridge.td2472
-rw-r--r--lib/Target/X86/X86ScheduleBtVer2.td77
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp13
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp4
-rw-r--r--lib/Transforms/IPO/FunctionImport.cpp25
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp10
-rw-r--r--lib/Transforms/IPO/Inliner.cpp10
-rw-r--r--lib/Transforms/IPO/LowerTypeTests.cpp11
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp18
-rw-r--r--lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp3
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp78
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp363
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp259
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp172
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp309
-rw-r--r--lib/Transforms/InstCombine/InstCombineInternal.h12
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp81
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp93
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp190
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp59
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp14
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp48
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp164
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp2
-rw-r--r--lib/Transforms/Instrumentation/CFGMST.h12
-rw-r--r--lib/Transforms/Instrumentation/InstrProfiling.cpp157
-rw-r--r--lib/Transforms/Instrumentation/MaximumSpanningTree.h6
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp7
-rw-r--r--lib/Transforms/Instrumentation/PGOInstrumentation.cpp2
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp7
-rw-r--r--lib/Transforms/Scalar/ConstantHoisting.cpp57
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp202
-rw-r--r--lib/Transforms/Scalar/InferAddressSpaces.cpp5
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopDeletion.cpp39
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopInterchange.cpp44
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp20
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp110
-rw-r--r--lib/Transforms/Scalar/MergedLoadStoreMotion.cpp2
-rw-r--r--lib/Transforms/Scalar/NewGVN.cpp16
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp2
-rw-r--r--lib/Transforms/Scalar/RewriteStatepointsForGC.cpp2
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp2
-rw-r--r--lib/Transforms/Scalar/SROA.cpp12
-rw-r--r--lib/Transforms/Scalar/StructurizeCFG.cpp2
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp20
-rw-r--r--lib/Transforms/Utils/CmpInstAnalysis.cpp2
-rw-r--r--lib/Transforms/Utils/CodeExtractor.cpp6
-rw-r--r--lib/Transforms/Utils/Evaluator.cpp2
-rw-r--r--lib/Transforms/Utils/FunctionComparator.cpp18
-rw-r--r--lib/Transforms/Utils/Local.cpp18
-rw-r--r--lib/Transforms/Utils/LoopUnrollRuntime.cpp143
-rw-r--r--lib/Transforms/Utils/LowerMemIntrinsics.cpp288
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp4
-rw-r--r--lib/Transforms/Utils/SimplifyIndVar.cpp47
-rw-r--r--lib/Transforms/Utils/SimplifyLibCalls.cpp4
-rw-r--r--lib/Transforms/Utils/VNCoercion.cpp15
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp9
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp45
-rw-r--r--runtimes/CMakeLists.txt200
-rw-r--r--runtimes/Components.cmake.in1
-rw-r--r--test/Analysis/BasicAA/unreachable-block.ll2
-rw-r--r--test/Analysis/CostModel/X86/slm-arith-costs.ll28
-rw-r--r--test/Analysis/DependenceAnalysis/BasePtrBug.ll80
-rw-r--r--test/Analysis/ScalarEvolution/guards.ll6
-rw-r--r--test/Assembler/2003-11-11-ImplicitRename.ll3
-rw-r--r--test/Assembler/2007-11-26-AttributeOverload.ll2
-rw-r--r--test/Assembler/atomic.ll26
-rw-r--r--test/Bitcode/Inputs/module-hash-strtab1.ll10
-rw-r--r--test/Bitcode/Inputs/module-hash-strtab2.ll10
-rw-r--r--test/Bitcode/atomic-no-syncscope.ll17
-rw-r--r--test/Bitcode/atomic-no-syncscope.ll.bcbin0 -> 1000 bytes
-rw-r--r--test/Bitcode/atomic.ll4
-rw-r--r--test/Bitcode/compatibility-3.6.ll24
-rw-r--r--test/Bitcode/compatibility-3.7.ll24
-rw-r--r--test/Bitcode/compatibility-3.8.ll24
-rw-r--r--test/Bitcode/compatibility-3.9.ll24
-rw-r--r--test/Bitcode/compatibility-4.0.ll24
-rw-r--r--test/Bitcode/compatibility.ll24
-rw-r--r--test/Bitcode/memInstructions.3.2.ll104
-rw-r--r--test/Bitcode/module-hash-strtab.ll15
-rw-r--r--test/Bitcode/module_hash.ll8
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll2
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll8
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir30
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir38
-rw-r--r--test/CodeGen/AArch64/arm64-csldst-mmo.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-misched-memdep-bug.ll6
-rw-r--r--test/CodeGen/AArch64/fence-singlethread.ll2
-rw-r--r--test/CodeGen/AArch64/preferred-function-alignment.ll26
-rw-r--r--test/CodeGen/AArch64/tailcall_misched_graph.ll4
-rw-r--r--test/CodeGen/AMDGPU/add.i16.ll10
-rw-r--r--test/CodeGen/AMDGPU/add.ll18
-rw-r--r--test/CodeGen/AMDGPU/add.v2i16.ll4
-rw-r--r--test/CodeGen/AMDGPU/add_i128.ll16
-rw-r--r--test/CodeGen/AMDGPU/add_i64.ll8
-rw-r--r--test/CodeGen/AMDGPU/addrspacecast.ll33
-rw-r--r--test/CodeGen/AMDGPU/alignbit-pat.ll2
-rw-r--r--test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll38
-rw-r--r--test/CodeGen/AMDGPU/and-gcn.ll3
-rw-r--r--test/CodeGen/AMDGPU/and.ll55
-rw-r--r--test/CodeGen/AMDGPU/any_extend_vector_inreg.ll6
-rw-r--r--test/CodeGen/AMDGPU/bitreverse.ll20
-rw-r--r--test/CodeGen/AMDGPU/bswap.ll2
-rw-r--r--test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll8
-rw-r--r--test/CodeGen/AMDGPU/cgp-addressing-modes.ll6
-rw-r--r--test/CodeGen/AMDGPU/clamp-omod-special-case.mir46
-rw-r--r--test/CodeGen/AMDGPU/coalescer_remat.ll2
-rw-r--r--test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir187
-rw-r--r--test/CodeGen/AMDGPU/constant-fold-mi-operands.ll2
-rw-r--r--test/CodeGen/AMDGPU/copy-illegal-type.ll62
-rw-r--r--test/CodeGen/AMDGPU/ctlz.ll75
-rw-r--r--test/CodeGen/AMDGPU/ctlz_zero_undef.ll78
-rw-r--r--test/CodeGen/AMDGPU/ctpop.ll91
-rw-r--r--test/CodeGen/AMDGPU/ctpop64.ll29
-rw-r--r--test/CodeGen/AMDGPU/cttz_zero_undef.ll19
-rw-r--r--test/CodeGen/AMDGPU/cvt_f32_ubyte.ll88
-rw-r--r--test/CodeGen/AMDGPU/detect-dead-lanes.mir10
-rw-r--r--test/CodeGen/AMDGPU/ds_read2.ll4
-rw-r--r--test/CodeGen/AMDGPU/ds_read2_superreg.ll10
-rw-r--r--test/CodeGen/AMDGPU/ds_read2st64.ll6
-rw-r--r--test/CodeGen/AMDGPU/early-if-convert-cost.ll2
-rw-r--r--test/CodeGen/AMDGPU/early-if-convert.ll2
-rw-r--r--test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll11
-rw-r--r--test/CodeGen/AMDGPU/extractelt-to-trunc.ll14
-rw-r--r--test/CodeGen/AMDGPU/fabs.f16.ll14
-rw-r--r--test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll50
-rw-r--r--test/CodeGen/AMDGPU/fadd.f16.ll58
-rw-r--r--test/CodeGen/AMDGPU/fadd64.ll12
-rw-r--r--test/CodeGen/AMDGPU/fcanonicalize-elimination.ll487
-rw-r--r--test/CodeGen/AMDGPU/fcanonicalize.f16.ll18
-rw-r--r--test/CodeGen/AMDGPU/fcanonicalize.ll2
-rw-r--r--test/CodeGen/AMDGPU/fcmp.f16.ll312
-rw-r--r--test/CodeGen/AMDGPU/fcmp64.ll12
-rw-r--r--test/CodeGen/AMDGPU/fconst64.ll9
-rw-r--r--test/CodeGen/AMDGPU/fcopysign.f16.ll91
-rw-r--r--test/CodeGen/AMDGPU/fdiv.f16.ll6
-rw-r--r--test/CodeGen/AMDGPU/fdiv.ll41
-rw-r--r--test/CodeGen/AMDGPU/fma-combine.ll34
-rw-r--r--test/CodeGen/AMDGPU/fma.f64.ll4
-rw-r--r--test/CodeGen/AMDGPU/fma.ll4
-rw-r--r--test/CodeGen/AMDGPU/fmax_legacy.ll10
-rw-r--r--test/CodeGen/AMDGPU/fmed3.ll4
-rw-r--r--test/CodeGen/AMDGPU/fmin_legacy.ll10
-rw-r--r--test/CodeGen/AMDGPU/fmul.f16.ll22
-rw-r--r--test/CodeGen/AMDGPU/fmul64.ll4
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.f16.ll28
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.f32.ll82
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.f64.ll12
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.v2f16.ll18
-rw-r--r--test/CodeGen/AMDGPU/fneg-combines.ll62
-rw-r--r--test/CodeGen/AMDGPU/fneg-fabs.f16.ll4
-rw-r--r--test/CodeGen/AMDGPU/fneg-fabs.ll6
-rw-r--r--test/CodeGen/AMDGPU/fneg.f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/fold-immediate-output-mods.mir53
-rw-r--r--test/CodeGen/AMDGPU/fold-operands-order.mir6
-rw-r--r--test/CodeGen/AMDGPU/fp32_to_fp16.ll6
-rw-r--r--test/CodeGen/AMDGPU/fpext.f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/fptosi.f16.ll6
-rw-r--r--test/CodeGen/AMDGPU/fptoui.f16.ll6
-rw-r--r--test/CodeGen/AMDGPU/fptrunc.f16.ll12
-rw-r--r--test/CodeGen/AMDGPU/fract.f64.ll10
-rw-r--r--test/CodeGen/AMDGPU/fract.ll12
-rw-r--r--test/CodeGen/AMDGPU/frem.ll8
-rw-r--r--test/CodeGen/AMDGPU/fsqrt.f64.ll4
-rw-r--r--test/CodeGen/AMDGPU/fsqrt.ll6
-rw-r--r--test/CodeGen/AMDGPU/fsub.f16.ll28
-rw-r--r--test/CodeGen/AMDGPU/fsub.ll24
-rw-r--r--test/CodeGen/AMDGPU/fsub64.ll4
-rw-r--r--test/CodeGen/AMDGPU/ftrunc.f64.ll6
-rw-r--r--test/CodeGen/AMDGPU/global-extload-i16.ll4
-rw-r--r--test/CodeGen/AMDGPU/global-smrd-unknown.ll20
-rw-r--r--test/CodeGen/AMDGPU/half.ll10
-rw-r--r--test/CodeGen/AMDGPU/imm.ll4
-rw-r--r--test/CodeGen/AMDGPU/immv216.ll8
-rw-r--r--test/CodeGen/AMDGPU/indirect-addressing-si.ll8
-rw-r--r--test/CodeGen/AMDGPU/inline-asm.ll4
-rw-r--r--test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.class.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.ceil.f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.cos.f16.ll12
-rw-r--r--test/CodeGen/AMDGPU/llvm.exp2.f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.floor.f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.fma.f16.ll12
-rw-r--r--test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll24
-rw-r--r--test/CodeGen/AMDGPU/llvm.log2.f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.maxnum.f16.ll22
-rw-r--r--test/CodeGen/AMDGPU/llvm.minnum.f16.ll22
-rw-r--r--test/CodeGen/AMDGPU/llvm.rint.f16.ll10
-rw-r--r--test/CodeGen/AMDGPU/llvm.round.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.sin.f16.ll12
-rw-r--r--test/CodeGen/AMDGPU/llvm.sqrt.f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.trunc.f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/load-global-f32.ll10
-rw-r--r--test/CodeGen/AMDGPU/load-global-f64.ll6
-rw-r--r--test/CodeGen/AMDGPU/load-global-i16.ll10
-rw-r--r--test/CodeGen/AMDGPU/load-global-i32.ll8
-rw-r--r--test/CodeGen/AMDGPU/load-global-i64.ll10
-rw-r--r--test/CodeGen/AMDGPU/load-global-i8.ll10
-rw-r--r--test/CodeGen/AMDGPU/load-weird-sizes.ll10
-rw-r--r--test/CodeGen/AMDGPU/lower-mem-intrinsics.ll12
-rw-r--r--test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir227
-rw-r--r--test/CodeGen/AMDGPU/mad-combine.ll106
-rw-r--r--test/CodeGen/AMDGPU/madak.ll6
-rw-r--r--test/CodeGen/AMDGPU/madmk.ll4
-rw-r--r--test/CodeGen/AMDGPU/max.ll4
-rw-r--r--test/CodeGen/AMDGPU/merge-stores.ll4
-rw-r--r--test/CodeGen/AMDGPU/mubuf.ll2
-rw-r--r--test/CodeGen/AMDGPU/mul.ll6
-rw-r--r--test/CodeGen/AMDGPU/multi-divergent-exit-region.ll4
-rw-r--r--test/CodeGen/AMDGPU/no-shrink-extloads.ll2
-rw-r--r--test/CodeGen/AMDGPU/or.ll6
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll2
-rw-r--r--test/CodeGen/AMDGPU/reduce-load-width-alignment.ll6
-rw-r--r--test/CodeGen/AMDGPU/regcoal-subrange-join.mir162
-rw-r--r--test/CodeGen/AMDGPU/reorder-stores.ll4
-rw-r--r--test/CodeGen/AMDGPU/rotl.i64.ll4
-rw-r--r--test/CodeGen/AMDGPU/rotr.i64.ll4
-rw-r--r--test/CodeGen/AMDGPU/rsq.ll8
-rw-r--r--test/CodeGen/AMDGPU/s_movk_i32.ll4
-rw-r--r--test/CodeGen/AMDGPU/sad.ll4
-rw-r--r--test/CodeGen/AMDGPU/saddo.ll6
-rw-r--r--test/CodeGen/AMDGPU/salu-to-valu.ll6
-rw-r--r--test/CodeGen/AMDGPU/scalar_to_vector.ll6
-rw-r--r--test/CodeGen/AMDGPU/schedule-global-loads.ll2
-rw-r--r--test/CodeGen/AMDGPU/scratch-buffer.ll4
-rw-r--r--test/CodeGen/AMDGPU/scratch-simple.ll6
-rw-r--r--test/CodeGen/AMDGPU/sdiv.ll6
-rw-r--r--test/CodeGen/AMDGPU/sdwa-peephole.ll24
-rw-r--r--test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll54
-rw-r--r--test/CodeGen/AMDGPU/select-vectors.ll6
-rw-r--r--test/CodeGen/AMDGPU/select.f16.ll63
-rw-r--r--test/CodeGen/AMDGPU/setcc-fneg-constant.ll6
-rw-r--r--test/CodeGen/AMDGPU/setcc.ll10
-rw-r--r--test/CodeGen/AMDGPU/sext-in-reg.ll8
-rw-r--r--test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll4
-rw-r--r--test/CodeGen/AMDGPU/sgpr-copy.ll4
-rw-r--r--test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll4
-rw-r--r--test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll2
-rw-r--r--test/CodeGen/AMDGPU/shift-i64-opts.ll4
-rw-r--r--test/CodeGen/AMDGPU/shl.ll4
-rw-r--r--test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir161
-rw-r--r--test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll2
-rw-r--r--test/CodeGen/AMDGPU/sign_extend.ll4
-rw-r--r--test/CodeGen/AMDGPU/sitofp.f16.ll4
-rw-r--r--test/CodeGen/AMDGPU/sminmax.ll26
-rw-r--r--test/CodeGen/AMDGPU/sminmax.v2i16.ll6
-rw-r--r--test/CodeGen/AMDGPU/spill-cfg-position.ll2
-rw-r--r--test/CodeGen/AMDGPU/sra.ll6
-rw-r--r--test/CodeGen/AMDGPU/srem.ll6
-rw-r--r--test/CodeGen/AMDGPU/srl.ll4
-rw-r--r--test/CodeGen/AMDGPU/ssubo.ll6
-rw-r--r--test/CodeGen/AMDGPU/sub.i16.ll10
-rw-r--r--test/CodeGen/AMDGPU/sub.ll4
-rw-r--r--test/CodeGen/AMDGPU/sub.v2i16.ll16
-rw-r--r--test/CodeGen/AMDGPU/syncscopes.ll19
-rw-r--r--test/CodeGen/AMDGPU/trunc-bitcast-vector.ll4
-rw-r--r--test/CodeGen/AMDGPU/trunc.ll6
-rw-r--r--test/CodeGen/AMDGPU/uaddo.ll10
-rw-r--r--test/CodeGen/AMDGPU/udiv.ll8
-rw-r--r--test/CodeGen/AMDGPU/uitofp.f16.ll4
-rw-r--r--test/CodeGen/AMDGPU/urem.ll6
-rw-r--r--test/CodeGen/AMDGPU/usubo.ll12
-rw-r--r--test/CodeGen/AMDGPU/v_cndmask.ll12
-rw-r--r--test/CodeGen/AMDGPU/v_mac.ll10
-rw-r--r--test/CodeGen/AMDGPU/v_mac_f16.ll38
-rw-r--r--test/CodeGen/AMDGPU/vectorize-global-local.ll2
-rw-r--r--test/CodeGen/AMDGPU/vop-shrink-frame-index.mir161
-rw-r--r--test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir40
-rw-r--r--test/CodeGen/AMDGPU/vselect.ll25
-rw-r--r--test/CodeGen/AMDGPU/waitcnt-permute.mir12
-rw-r--r--test/CodeGen/AMDGPU/xor.ll8
-rw-r--r--test/CodeGen/AMDGPU/zext-i64-bit-operand.ll4
-rw-r--r--test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll24
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir1252
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll30
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir20
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir1612
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalizer.mir33
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir58
-rw-r--r--test/CodeGen/ARM/arguments-nosplit-double.ll1
-rw-r--r--test/CodeGen/ARM/arguments-nosplit-i64.ll1
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll8
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-ldm.ll4
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-stm-wrback.ll2
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vfma.ll28
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll10
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vldm.ll6
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vstm-wrback.ll2
-rw-r--r--test/CodeGen/ARM/fence-singlethread.ll2
-rw-r--r--test/CodeGen/ARM/ror.ll33
-rw-r--r--test/CodeGen/ARM/scavenging.mir66
-rw-r--r--test/CodeGen/AVR/branch-relaxation.ll96
-rw-r--r--test/CodeGen/AVR/ctlz.ll5
-rw-r--r--test/CodeGen/AVR/cttz.ll4
-rw-r--r--test/CodeGen/AVR/frmidx-iterator-bug.ll33
-rw-r--r--test/CodeGen/AVR/icall-func-pointer-correct-addr-space.ll15
-rw-r--r--test/CodeGen/AVR/pseudo/ANDIWRdK.mir6
-rw-r--r--test/CodeGen/AVR/pseudo/COMWRd.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/ORIWRdK.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/SBCIWRdK.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/SUBIWRdK.mir2
-rw-r--r--test/CodeGen/AVR/select-mbb-placement-bug.ll6
-rw-r--r--test/CodeGen/BPF/undef.ll58
-rw-r--r--test/CodeGen/Generic/pr33094.ll18
-rw-r--r--test/CodeGen/Hexagon/convertdptoint.ll8
-rw-r--r--test/CodeGen/Hexagon/convertdptoll.ll4
-rw-r--r--test/CodeGen/Hexagon/convertsptoint.ll4
-rw-r--r--test/CodeGen/Hexagon/convertsptoll.ll4
-rw-r--r--test/CodeGen/Hexagon/dadd.ll8
-rw-r--r--test/CodeGen/Hexagon/dmul.ll8
-rw-r--r--test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll8
-rw-r--r--test/CodeGen/Hexagon/dsub.ll8
-rw-r--r--test/CodeGen/Hexagon/fadd.ll8
-rw-r--r--test/CodeGen/Hexagon/fmul.ll8
-rw-r--r--test/CodeGen/Hexagon/fsub.ll8
-rw-r--r--test/CodeGen/Hexagon/hasfp-crash1.ll82
-rw-r--r--test/CodeGen/Hexagon/hasfp-crash2.ll83
-rw-r--r--test/CodeGen/Hexagon/hvx-nontemporal.ll28
-rw-r--r--test/CodeGen/Hexagon/target-flag-ext.mir24
-rw-r--r--test/CodeGen/MIR/AArch64/atomic-memoperands.mir4
-rw-r--r--test/CodeGen/MIR/AArch64/invalid-target-memoperands.mir19
-rw-r--r--test/CodeGen/MIR/AArch64/target-memoperands.mir22
-rw-r--r--test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir20
-rw-r--r--test/CodeGen/MIR/AMDGPU/syncscopes.mir98
-rw-r--r--test/CodeGen/MIR/AMDGPU/target-flags.mir29
-rw-r--r--test/CodeGen/MIR/Generic/runPass.mir2
-rw-r--r--test/CodeGen/MIR/Hexagon/target-flags.mir36
-rw-r--r--test/CodeGen/MIR/X86/tied-physical-regs-match.mir22
-rw-r--r--test/CodeGen/MSP430/Inst16mm.ll4
-rw-r--r--test/CodeGen/NVPTX/lower-aggr-copies.ll61
-rw-r--r--test/CodeGen/PowerPC/PR33636.ll702
-rw-r--r--test/CodeGen/PowerPC/atomics-regression.ll528
-rw-r--r--test/CodeGen/PowerPC/bitreverse.ll23
-rw-r--r--test/CodeGen/PowerPC/build-vector-tests.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc-ctr-dead-code.ll38
-rw-r--r--test/CodeGen/PowerPC/ppc-redzone-alignment-bug.ll32
-rw-r--r--test/CodeGen/PowerPC/ppc64le-smallarg.ll4
-rw-r--r--test/CodeGen/PowerPC/pr33093.ll165
-rw-r--r--test/CodeGen/PowerPC/select-addrRegRegOnly.ll37
-rw-r--r--test/CodeGen/PowerPC/svr4-redzone.ll6
-rw-r--r--test/CodeGen/PowerPC/tailcall1-64.ll7
-rw-r--r--test/CodeGen/PowerPC/testBitReverse.ll105
-rw-r--r--test/CodeGen/PowerPC/vec_extract_p9.ll167
-rw-r--r--test/CodeGen/PowerPC/vec_int_ext.ll253
-rw-r--r--test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll16
-rw-r--r--test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir34
-rw-r--r--test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll22
-rw-r--r--test/CodeGen/WebAssembly/umulo-i64.ll21
-rw-r--r--test/CodeGen/X86/2012-08-16-setcc.ll42
-rw-r--r--test/CodeGen/X86/GC/badreadproto.ll2
-rw-r--r--test/CodeGen/X86/GC/badrootproto.ll2
-rw-r--r--test/CodeGen/X86/GC/badwriteproto.ll2
-rw-r--r--test/CodeGen/X86/GC/fat.ll2
-rw-r--r--test/CodeGen/X86/GC/outside.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/GV.ll63
-rw-r--r--test/CodeGen/X86/GlobalISel/add-vec.ll173
-rw-r--r--test/CodeGen/X86/GlobalISel/constant.ll9
-rw-r--r--test/CodeGen/X86/GlobalISel/ext-x86-64.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/ext.ll36
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-GV.mir31
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-ext.mir171
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir110
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll22
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir27
-rw-r--r--test/CodeGen/X86/GlobalISel/select-GV.mir99
-rw-r--r--test/CodeGen/X86/GlobalISel/select-constant.mir31
-rw-r--r--test/CodeGen/X86/GlobalISel/select-ext.mir64
-rw-r--r--test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir53
-rw-r--r--test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/x86_64-fallback.ll18
-rw-r--r--test/CodeGen/X86/avg.ll6
-rw-r--r--test/CodeGen/X86/avx-cmp.ll197
-rw-r--r--test/CodeGen/X86/avx-load-store.ll277
-rw-r--r--test/CodeGen/X86/avx-schedule.ll648
-rw-r--r--test/CodeGen/X86/avx-unpack.ll166
-rw-r--r--test/CodeGen/X86/avx-vinsertf128.ll118
-rw-r--r--test/CodeGen/X86/avx2-vbroadcast.ll12
-rw-r--r--test/CodeGen/X86/avx512-cmp.ll2
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll26
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll53
-rw-r--r--test/CodeGen/X86/avx512vl-vec-cmp.ll925
-rw-r--r--test/CodeGen/X86/avx512vl-vec-masked-cmp.ll50906
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-128.ll156
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-256.ll104
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-512.ll1868
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll3483
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll3279
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool.ll685
-rw-r--r--test/CodeGen/X86/bitcast-setcc-128.ll156
-rw-r--r--test/CodeGen/X86/bitcast-setcc-256.ll419
-rw-r--r--test/CodeGen/X86/bitcast-setcc-512.ll1377
-rw-r--r--test/CodeGen/X86/block-placement.ll101
-rw-r--r--test/CodeGen/X86/bool-simplify.ll129
-rw-r--r--test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll1991
-rw-r--r--test/CodeGen/X86/bswap-wide-int.ll4
-rw-r--r--test/CodeGen/X86/build-vector-128.ll23
-rw-r--r--test/CodeGen/X86/build-vector-256.ll29
-rw-r--r--test/CodeGen/X86/build-vector-512.ll20
-rw-r--r--test/CodeGen/X86/cast-vsel.ll2
-rw-r--r--test/CodeGen/X86/clear_upper_vector_element_bits.ll236
-rw-r--r--test/CodeGen/X86/cmov.ll205
-rw-r--r--test/CodeGen/X86/code_placement_cold_loop_blocks.ll5
-rw-r--r--test/CodeGen/X86/combine-avx-intrinsics.ll47
-rw-r--r--test/CodeGen/X86/combine-avx2-intrinsics.ll69
-rw-r--r--test/CodeGen/X86/combine-rotates.ll80
-rw-r--r--test/CodeGen/X86/combine-sse41-intrinsics.ll72
-rw-r--r--test/CodeGen/X86/constant-hoisting-bfi.ll52
-rw-r--r--test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll124
-rw-r--r--test/CodeGen/X86/extract-store.ll2
-rw-r--r--test/CodeGen/X86/extractelement-legalization-store-ordering.ll51
-rw-r--r--test/CodeGen/X86/fast-isel-abort-warm.ll19
-rw-r--r--test/CodeGen/X86/fast-isel-gc-intrinsics.ll57
-rw-r--r--test/CodeGen/X86/fastisel-softfloat.ll15
-rw-r--r--test/CodeGen/X86/fp128-i128.ll2
-rw-r--r--test/CodeGen/X86/gather-addresses.ll16
-rw-r--r--test/CodeGen/X86/half.ll1045
-rw-r--r--test/CodeGen/X86/illegal-bitfield-loadstore.ll251
-rw-r--r--test/CodeGen/X86/optimize-max-1.ll51
-rw-r--r--test/CodeGen/X86/optimize-max-2.ll26
-rw-r--r--test/CodeGen/X86/pr15309.ll50
-rw-r--r--test/CodeGen/X86/pr23603.ll27
-rw-r--r--test/CodeGen/X86/pr33715.ll16
-rw-r--r--test/CodeGen/X86/rdrand-x86_64.ll19
-rw-r--r--test/CodeGen/X86/rdrand.ll119
-rw-r--r--test/CodeGen/X86/rdseed-x86_64.ll19
-rw-r--r--test/CodeGen/X86/rdseed.ll66
-rw-r--r--test/CodeGen/X86/recip-fastmath.ll116
-rw-r--r--test/CodeGen/X86/recip-fastmath2.ll162
-rw-r--r--test/CodeGen/X86/regalloc-reconcile-broken-hints.ll2
-rw-r--r--test/CodeGen/X86/rotate4.ll104
-rw-r--r--test/CodeGen/X86/sbb.ll46
-rw-r--r--test/CodeGen/X86/select_const.ll113
-rw-r--r--test/CodeGen/X86/shift-codegen.ll42
-rw-r--r--test/CodeGen/X86/shift-folding.ll57
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-256.ll313
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-512.ll422
-rw-r--r--test/CodeGen/X86/sink-blockfreq.ll2
-rw-r--r--test/CodeGen/X86/sink-gep-before-mem-inst.ll25
-rw-r--r--test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll55
-rw-r--r--test/CodeGen/X86/sse-schedule.ll248
-rw-r--r--test/CodeGen/X86/sse2-schedule.ll598
-rw-r--r--test/CodeGen/X86/sse3-schedule.ll48
-rw-r--r--test/CodeGen/X86/sse41-schedule.ll222
-rw-r--r--test/CodeGen/X86/sse42-schedule.ll38
-rw-r--r--test/CodeGen/X86/sse4a-schedule.ll95
-rw-r--r--test/CodeGen/X86/ssse3-schedule.ll74
-rw-r--r--test/CodeGen/X86/swizzle-avx2.ll73
-rw-r--r--test/CodeGen/X86/tbm_patterns.ll502
-rw-r--r--test/CodeGen/X86/vec-copysign.ll2
-rw-r--r--test/CodeGen/X86/vec_return.ll17
-rw-r--r--test/CodeGen/X86/vec_shift6.ll9
-rw-r--r--test/CodeGen/X86/vec_unsafe-fp-math.ll15
-rw-r--r--test/CodeGen/X86/vector-popcnt-128.ll93
-rw-r--r--test/CodeGen/X86/vector-popcnt-256.ll14
-rw-r--r--test/CodeGen/X86/vector-popcnt-512.ll120
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-sse4a.ll86
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-ssse3.ll15
-rw-r--r--test/CodeGen/X86/vector-shuffle-sse4a.ll129
-rw-r--r--test/CodeGen/X86/vector-truncate-combine.ll10
-rw-r--r--test/CodeGen/X86/vector-tzcnt-128.ll54
-rw-r--r--test/CodeGen/X86/vector-tzcnt-256.ll28
-rw-r--r--test/CodeGen/X86/vector-tzcnt-512.ll124
-rw-r--r--test/CodeGen/X86/wide-integer-cmp.ll2
-rw-r--r--test/CodeGen/X86/x32-lea-1.ll10
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll229
-rw-r--r--test/CodeGen/X86/zext-shl.ll39
-rw-r--r--test/CodeGen/X86/zext-trunc.ll9
-rw-r--r--test/DebugInfo/COFF/asm.ll6
-rw-r--r--test/DebugInfo/COFF/cpp-mangling.ll4
-rw-r--r--test/DebugInfo/COFF/fp-stack.ll2
-rw-r--r--test/DebugInfo/COFF/globals.ll6
-rw-r--r--test/DebugInfo/COFF/inlining-files.ll4
-rw-r--r--test/DebugInfo/COFF/inlining-header.ll8
-rw-r--r--test/DebugInfo/COFF/inlining-levels.ll8
-rw-r--r--test/DebugInfo/COFF/inlining-same-name.ll6
-rw-r--r--test/DebugInfo/COFF/inlining.ll6
-rw-r--r--test/DebugInfo/COFF/int8-char-type.ll4
-rw-r--r--test/DebugInfo/COFF/local-constant.ll5
-rw-r--r--test/DebugInfo/COFF/local-variable-gap.ll7
-rw-r--r--test/DebugInfo/COFF/local-variables.ll26
-rw-r--r--test/DebugInfo/COFF/long-name.ll2
-rw-r--r--test/DebugInfo/COFF/multifile.ll8
-rw-r--r--test/DebugInfo/COFF/multifunction.ll12
-rw-r--r--test/DebugInfo/COFF/pieces.ll34
-rw-r--r--test/DebugInfo/COFF/register-variables.ll30
-rw-r--r--test/DebugInfo/COFF/simple.ll8
-rw-r--r--test/DebugInfo/COFF/typedef.ll4
-rw-r--r--test/DebugInfo/COFF/types-array.ll6
-rw-r--r--test/DebugInfo/COFF/types-basic.ll46
-rw-r--r--test/DebugInfo/COFF/udts.ll22
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-str-offsets-macho.obin0 -> 1584 bytes
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-str-offsets-macho.s201
-rwxr-xr-xtest/DebugInfo/Inputs/dwarfdump-test3.elf-x86-64-space (renamed from test/DebugInfo/Inputs/dwarfdump-test3.elf-x86-64 space)bin8944 -> 8944 bytes
-rw-r--r--test/DebugInfo/PDB/Inputs/every-type.cpp63
-rw-r--r--test/DebugInfo/PDB/Inputs/every-type.pdbbin0 -> 102400 bytes
-rw-r--r--test/DebugInfo/PDB/Inputs/every-type.yaml272
-rw-r--r--test/DebugInfo/PDB/every-type.test261
-rw-r--r--test/DebugInfo/PDB/pdbdump-headers.test116
-rw-r--r--test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test3
-rw-r--r--test/DebugInfo/PDB/pdbdump-mergetypes.test6
-rw-r--r--test/DebugInfo/X86/dbg-declare-inalloca.ll14
-rw-r--r--test/DebugInfo/dwarfdump-str-offsets.test148
-rw-r--r--test/DebugInfo/invalid-relocations.test35
-rw-r--r--test/DebugInfo/llvm-symbolizer.test7
-rw-r--r--test/Instrumentation/MemorySanitizer/unsized_type.ll22
-rw-r--r--test/Instrumentation/ThreadSanitizer/atomic.ll8
-rw-r--r--test/LTO/Resolution/X86/linker-redef-thin.ll16
-rw-r--r--test/Linker/Inputs/syncscope-1.ll6
-rw-r--r--test/Linker/Inputs/syncscope-2.ll6
-rw-r--r--test/Linker/Inputs/thumb-module-inline-asm.ll3
-rw-r--r--test/Linker/link-arm-and-thumb-module-inline-asm.ll20
-rw-r--r--test/Linker/syncscopes.ll11
-rw-r--r--test/MC/AArch64/label-arithmetic-diags-elf.s51
-rw-r--r--test/MC/AMDGPU/gfx9_asm_all.s459
-rw-r--r--test/MC/AMDGPU/vop3p-err.s41
-rw-r--r--test/MC/AMDGPU/vop3p.s63
-rw-r--r--test/MC/ARM/elf-movt.s24
-rw-r--r--test/MC/ARM/invalid-instructions-spellcheck.s68
-rw-r--r--test/MC/ARM/ldr-pseudo-unpredictable.s16
-rw-r--r--test/MC/COFF/bad-expr.s3
-rw-r--r--test/MC/COFF/cv-def-range-gap.s16
-rw-r--r--test/MC/COFF/cv-def-range.s10
-rw-r--r--test/MC/COFF/cv-inline-linetable-infloop.s2
-rw-r--r--test/MC/COFF/cv-inline-linetable-unlikely.s4
-rw-r--r--test/MC/COFF/cv-inline-linetable-unreachable.s2
-rw-r--r--test/MC/COFF/cv-inline-linetable.s4
-rw-r--r--test/MC/Disassembler/Mips/mt/valid-r2-el.txt32
-rw-r--r--test/MC/Disassembler/Mips/mt/valid-r2.txt32
-rw-r--r--test/MC/ELF/bad-expr3.s3
-rw-r--r--test/MC/Mips/addend.s21
-rw-r--r--test/MC/Mips/mt/abiflag.s10
-rw-r--r--test/MC/Mips/mt/invalid-wrong-error.s3
-rw-r--r--test/MC/Mips/mt/invalid.s27
-rw-r--r--test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s18
-rw-r--r--test/MC/Mips/mt/mftr-mttr-aliases-invalid.s23
-rw-r--r--test/MC/Mips/mt/mftr-mttr-aliases.s47
-rw-r--r--test/MC/Mips/mt/mftr-mttr-reserved-valid.s8
-rw-r--r--test/MC/Mips/mt/module-directive-invalid.s6
-rw-r--r--test/MC/Mips/mt/module-directive.s16
-rw-r--r--test/MC/Mips/mt/set-directive.s14
-rw-r--r--test/MC/Mips/mt/valid.s33
-rw-r--r--test/MC/WebAssembly/array-fill.ll14
-rw-r--r--test/MC/WebAssembly/external-data.ll3
-rw-r--r--test/MC/WebAssembly/external-func-address.ll49
-rw-r--r--test/MC/WebAssembly/unnamed-data.ll3
-rw-r--r--test/MC/WebAssembly/weak-alias.ll37
-rw-r--r--test/Object/Inputs/trivial-object-test.wasmbin0 -> 303 bytes
-rw-r--r--test/Object/Inputs/trivial.ll3
-rw-r--r--test/Object/nm-trivial-object.test7
-rw-r--r--test/Object/obj2yaml.test25
-rw-r--r--test/Object/objdump-relocations.test7
-rw-r--r--test/ObjectYAML/wasm/data_section.yaml5
-rw-r--r--test/Other/2002-01-31-CallGraph.ll2
-rw-r--r--test/Other/new-pm-defaults.ll40
-rw-r--r--test/Other/new-pm-lto-defaults.ll11
-rw-r--r--test/Other/pass-pipelines.ll2
-rw-r--r--test/SafepointIRVerifier/basic-use-after-reloc.ll23
-rw-r--r--test/SafepointIRVerifier/compares.ll85
-rw-r--r--test/SafepointIRVerifier/constant-bases.ll70
-rw-r--r--test/SafepointIRVerifier/unrecorded-live-at-sp.ll71
-rw-r--r--test/SafepointIRVerifier/uses-in-phi-nodes.ll78
-rw-r--r--test/TableGen/AsmVariant.td1
-rw-r--r--test/TableGen/GlobalISelEmitter.td931
-rw-r--r--test/TableGen/UnterminatedComment.td2
-rw-r--r--test/Transforms/ArgumentPromotion/pr33641_remove_arg_dbgvalue.ll38
-rw-r--r--test/Transforms/CodeGenPrepare/X86/memcmp.ll77
-rw-r--r--test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll24
-rw-r--r--test/Transforms/CodeGenPrepare/crash-on-large-allocas.ll16
-rw-r--r--test/Transforms/ConstantHoisting/ARM/bad-cases.ll31
-rw-r--r--test/Transforms/ConstantHoisting/ARM/insertvalue.ll31
-rw-r--r--test/Transforms/ConstantHoisting/X86/ehpad.ll5
-rw-r--r--test/Transforms/GVN/PRE/atomic.ll6
-rw-r--r--test/Transforms/GVN/PRE/phi-translate-2.ll131
-rw-r--r--test/Transforms/GVN/PRE/pre-gep-load.ll2
-rw-r--r--test/Transforms/GVN/PRE/pre-load.ll6
-rw-r--r--test/Transforms/IndVarSimplify/canonicalize-cmp.ll98
-rw-r--r--test/Transforms/IndVarSimplify/eliminate-comparison.ll4
-rw-r--r--test/Transforms/IndVarSimplify/strengthen-overflow.ll84
-rw-r--r--test/Transforms/IndVarSimplify/widen-loop-comp.ll2
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/basic.ll12
-rw-r--r--test/Transforms/Inline/ARM/inline-target-attr.ll60
-rw-r--r--test/Transforms/Inline/ARM/lit.local.cfg2
-rw-r--r--test/Transforms/Inline/cgscc-incremental-invalidate.ll105
-rw-r--r--test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll29
-rw-r--r--test/Transforms/InstCombine/and-or-not.ll24
-rw-r--r--test/Transforms/InstCombine/bswap-fold.ll161
-rw-r--r--test/Transforms/InstCombine/cmp-intrinsic.ll123
-rw-r--r--test/Transforms/InstCombine/consecutive-fences.ll12
-rw-r--r--test/Transforms/InstCombine/icmp.ll16
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll60
-rw-r--r--test/Transforms/InstCombine/or-xor.ll24
-rw-r--r--test/Transforms/InstCombine/pr33689_same_bitwidth.ll53
-rw-r--r--test/Transforms/InstCombine/select-implied.ll77
-rw-r--r--test/Transforms/InstCombine/select.ll7
-rw-r--r--test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll29
-rw-r--r--test/Transforms/LoopRotate/pr33701.ll27
-rw-r--r--test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll4
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lsr-filtering-scaledreg.ll60
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll353
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop.ll68
-rw-r--r--test/Transforms/LoopVectorize/X86/slm-no-vectorize.ll49
-rw-r--r--test/Transforms/LoopVectorize/if-conversion-nest.ll95
-rw-r--r--test/Transforms/LoopVectorize/pr33706.ll61
-rw-r--r--test/Transforms/LowerTypeTests/Inputs/import-icall.yaml1
-rw-r--r--test/Transforms/LowerTypeTests/import-icall.ll7
-rw-r--r--test/Transforms/NewGVN/pr33720.ll91
-rw-r--r--test/Transforms/PGOProfile/counter_promo_exit_merge.ll4
-rw-r--r--test/Transforms/PGOProfile/counter_promo_mexits.ll4
-rw-r--r--test/Transforms/PGOProfile/counter_promo_nest.ll165
-rw-r--r--test/Transforms/SimplifyCFG/implied-and-or.ll183
-rw-r--r--test/Transforms/SimplifyCFG/sink-common-code.ll24
-rw-r--r--test/Transforms/Sink/fence.ll8
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/pr33536.ll37
-rw-r--r--test/Unit/lit.cfg5
-rw-r--r--test/Verifier/2004-05-21-SwitchConstantMismatch.ll2
-rw-r--r--test/Verifier/2007-12-21-InvokeParamAttrs.ll2
-rw-r--r--test/Verifier/2008-01-11-VarargAttrs.ll2
-rw-r--r--test/Verifier/2009-05-29-InvokeResult1.ll2
-rw-r--r--test/Verifier/2009-05-29-InvokeResult2.ll2
-rw-r--r--test/Verifier/2009-05-29-InvokeResult3.ll2
-rw-r--r--test/Verifier/byval-1.ll2
-rw-r--r--test/Verifier/element-wise-atomic-memory-intrinsics.ll42
-rw-r--r--test/Verifier/gcread-ptrptr.ll2
-rw-r--r--test/Verifier/gcroot-alloca.ll2
-rw-r--r--test/Verifier/gcroot-meta.ll2
-rw-r--r--test/Verifier/gcroot-ptrptr.ll2
-rw-r--r--test/Verifier/gcwrite-ptrptr.ll2
-rw-r--r--test/lit.cfg5
-rw-r--r--test/tools/llvm-cov/threads.c11
-rw-r--r--test/tools/llvm-cov/zeroFunctionFile.c2
-rw-r--r--test/tools/llvm-objdump/ARM/Inputs/reloc-half.obj.macho-armbin0 -> 360 bytes
-rw-r--r--test/tools/llvm-objdump/ARM/macho-reloc-half.test4
-rw-r--r--test/tools/llvm-objdump/Inputs/test.wasmbin181 -> 0 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/trivial.ll19
-rw-r--r--test/tools/llvm-objdump/Inputs/trivial.obj.wasmbin0 -> 303 bytes
-rw-r--r--test/tools/llvm-objdump/WebAssembly/symbol-table.test17
-rw-r--r--test/tools/llvm-objdump/wasm.txt35
-rw-r--r--test/tools/llvm-pdbdump/partial-type-stream.test3
-rw-r--r--test/tools/llvm-profdata/c-general.test4
-rw-r--r--test/tools/llvm-readobj/Inputs/trivial.ll14
-rw-r--r--test/tools/llvm-readobj/Inputs/trivial.obj.wasmbin221 -> 285 bytes
-rw-r--r--test/tools/llvm-readobj/codeview-linetables.test20
-rw-r--r--test/tools/llvm-readobj/file-headers.test3
-rw-r--r--test/tools/llvm-readobj/relocations.test15
-rw-r--r--test/tools/llvm-readobj/sections.test131
-rw-r--r--test/tools/llvm-readobj/symbols.test22
-rw-r--r--tools/gold/gold-plugin.cpp2
-rw-r--r--tools/lli/OrcLazyJIT.cpp25
-rw-r--r--tools/lli/OrcLazyJIT.h29
-rw-r--r--tools/lli/RemoteJITUtils.h4
-rw-r--r--tools/lli/lli.cpp2
-rw-r--r--tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp8
-rw-r--r--tools/llvm-c-test/echo.cpp8
-rw-r--r--tools/llvm-cov/CodeCoverage.cpp19
-rw-r--r--tools/llvm-lto/llvm-lto.cpp6
-rw-r--r--tools/llvm-objdump/llvm-objdump.cpp2
-rw-r--r--tools/llvm-pdbutil/CMakeLists.txt1
-rw-r--r--tools/llvm-pdbutil/Diff.cpp692
-rw-r--r--tools/llvm-pdbutil/DiffPrinter.cpp147
-rw-r--r--tools/llvm-pdbutil/DiffPrinter.h172
-rw-r--r--tools/llvm-pdbutil/DumpOutputStyle.cpp7
-rw-r--r--tools/llvm-pdbutil/FormatUtil.cpp52
-rw-r--r--tools/llvm-pdbutil/FormatUtil.h10
-rw-r--r--tools/llvm-pdbutil/MinimalTypeDumper.cpp6
-rw-r--r--tools/llvm-pdbutil/StreamUtil.cpp85
-rw-r--r--tools/llvm-pdbutil/StreamUtil.h5
-rw-r--r--tools/llvm-pdbutil/llvm-pdbutil.cpp44
-rw-r--r--tools/llvm-pdbutil/llvm-pdbutil.h7
-rw-r--r--tools/llvm-profdata/llvm-profdata.cpp74
-rw-r--r--tools/llvm-readobj/COFFDumper.cpp6
-rw-r--r--tools/llvm-readobj/WasmDumper.cpp6
-rw-r--r--tools/llvm-shlib/CMakeLists.txt2
-rw-r--r--tools/llvm-stress/llvm-stress.cpp16
-rw-r--r--tools/obj2yaml/wasm2yaml.cpp7
-rw-r--r--tools/opt-viewer/CMakeLists.txt13
-rwxr-xr-xtools/opt-viewer/opt-diff.py (renamed from utils/opt-viewer/opt-diff.py)0
-rwxr-xr-xtools/opt-viewer/opt-stats.py (renamed from utils/opt-viewer/opt-stats.py)0
-rwxr-xr-xtools/opt-viewer/opt-viewer.py (renamed from utils/opt-viewer/opt-viewer.py)5
-rw-r--r--tools/opt-viewer/optpmap.py (renamed from utils/opt-viewer/optpmap.py)0
-rw-r--r--tools/opt-viewer/optrecord.py (renamed from utils/opt-viewer/optrecord.py)0
-rw-r--r--tools/opt-viewer/style.css (renamed from utils/opt-viewer/style.css)0
-rw-r--r--tools/opt/NewPMDriver.cpp101
-rw-r--r--tools/sanstats/sanstats.cpp5
-rw-r--r--tools/yaml2obj/yaml2wasm.cpp2
-rw-r--r--unittests/ADT/APFloatTest.cpp16
-rw-r--r--unittests/ADT/FunctionRefTest.cpp14
-rw-r--r--unittests/Analysis/AliasAnalysisTest.cpp5
-rw-r--r--unittests/Analysis/CGSCCPassManagerTest.cpp198
-rw-r--r--unittests/Analysis/LazyCallGraphTest.cpp33
-rw-r--r--unittests/ExecutionEngine/Orc/CompileOnDemandLayerTest.cpp5
-rw-r--r--unittests/ExecutionEngine/Orc/GlobalMappingLayerTest.cpp8
-rw-r--r--unittests/ExecutionEngine/Orc/LazyEmittingLayerTest.cpp2
-rw-r--r--unittests/ExecutionEngine/Orc/ObjectTransformLayerTest.cpp95
-rw-r--r--unittests/ExecutionEngine/Orc/OrcCAPITest.cpp32
-rw-r--r--unittests/ExecutionEngine/Orc/OrcTestCommon.h29
-rw-r--r--unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp60
-rw-r--r--unittests/IR/CMakeLists.txt2
-rw-r--r--unittests/IR/ModuleTest.cpp2
-rw-r--r--unittests/IR/PassBuilderCallbacksTest.cpp520
-rw-r--r--unittests/IR/PassManagerTest.cpp7
-rw-r--r--unittests/ProfileData/CMakeLists.txt2
-rw-r--r--unittests/ProfileData/CoverageMappingTest.cpp97
-rw-r--r--unittests/ProfileData/InstrProfTest.cpp268
-rw-r--r--unittests/Support/DynamicLibrary/DynamicLibraryTest.cpp10
-rw-r--r--unittests/Support/ErrorTest.cpp2
-rw-r--r--unittests/Support/Host.cpp61
-rw-r--r--unittests/Support/MathExtrasTest.cpp2
-rw-r--r--unittests/Transforms/Utils/Cloning.cpp13
-rw-r--r--utils/TableGen/AsmMatcherEmitter.cpp47
-rw-r--r--utils/TableGen/AsmWriterEmitter.cpp4
-rw-r--r--utils/TableGen/CodeEmitterGen.cpp14
-rw-r--r--utils/TableGen/CodeGenInstruction.h2
-rw-r--r--utils/TableGen/CodeGenMapTable.cpp4
-rw-r--r--utils/TableGen/CodeGenTarget.cpp2
-rw-r--r--utils/TableGen/CodeGenTarget.h2
-rw-r--r--utils/TableGen/DAGISelMatcherGen.cpp2
-rw-r--r--utils/TableGen/FastISelEmitter.cpp18
-rw-r--r--utils/TableGen/FixedLenDecoderEmitter.cpp4
-rw-r--r--utils/TableGen/GlobalISelEmitter.cpp777
-rw-r--r--utils/TableGen/InstrInfoEmitter.cpp12
-rw-r--r--utils/TableGen/RegisterBankEmitter.cpp2
-rw-r--r--utils/TableGen/SearchableTableEmitter.cpp10
-rw-r--r--utils/TableGen/SubtargetEmitter.cpp9
-rw-r--r--utils/TableGen/X86DisassemblerTables.cpp2
-rw-r--r--utils/TableGen/X86DisassemblerTables.h2
-rw-r--r--utils/TableGen/X86ModRMFilters.h2
-rw-r--r--utils/TableGen/X86RecognizableInstr.cpp4
-rw-r--r--utils/TableGen/X86RecognizableInstr.h2
-rwxr-xr-xutils/docker/build_docker_image.sh54
-rwxr-xr-xutils/docker/scripts/build_install_llvm.sh12
-rw-r--r--utils/lit/lit/TestRunner.py241
-rw-r--r--utils/lit/lit/formats/googletest.py5
-rwxr-xr-xutils/lit/lit/main.py3
-rw-r--r--utils/lit/tests/selecting.py5
-rw-r--r--utils/vim/syntax/llvm.vim6
1199 files changed, 98421 insertions, 18946 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fc05f30e4cdb..61ecfdf970d0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -288,6 +288,10 @@ set(LLVM_LIBDIR_SUFFIX "" CACHE STRING "Define suffix of library directory name
set(LLVM_TOOLS_INSTALL_DIR "bin" CACHE STRING "Path for binary subdirectory (defaults to 'bin')")
mark_as_advanced(LLVM_TOOLS_INSTALL_DIR)
+set(LLVM_UTILS_INSTALL_DIR "bin" CACHE STRING
+ "Path to install LLVM utilities (enabled by LLVM_INSTALL_UTILS=ON) (defaults to LLVM_TOOLS_INSTALL_DIR)")
+mark_as_advanced(LLVM_TOOLS_INSTALL_DIR)
+
# They are used as destination of target generators.
set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/bin)
set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/lib${LLVM_LIBDIR_SUFFIX})
diff --git a/cmake/modules/AddLLVM.cmake b/cmake/modules/AddLLVM.cmake
index 2b54bdbf2900..1c922651b133 100755
--- a/cmake/modules/AddLLVM.cmake
+++ b/cmake/modules/AddLLVM.cmake
@@ -91,7 +91,7 @@ function(add_llvm_symbol_exports target_name export_file)
DEPENDS ${export_file}
VERBATIM
COMMENT "Creating export file for ${target_name}")
- if (${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
+ if (${LLVM_LINKER_IS_SOLARISLD})
set_property(TARGET ${target_name} APPEND_STRING PROPERTY
LINK_FLAGS " -Wl,-M,${CMAKE_CURRENT_BINARY_DIR}/${native_export_file}")
else()
@@ -148,13 +148,28 @@ function(add_llvm_symbol_exports target_name export_file)
endfunction(add_llvm_symbol_exports)
if(NOT WIN32 AND NOT APPLE)
+ # Detect what linker we have here
execute_process(
COMMAND ${CMAKE_C_COMPILER} -Wl,--version
OUTPUT_VARIABLE stdout
- ERROR_QUIET
+ ERROR_VARIABLE stderr
)
+ set(LLVM_LINKER_DETECTED ON)
if("${stdout}" MATCHES "GNU gold")
set(LLVM_LINKER_IS_GOLD ON)
+ message(STATUS "Linker detection: GNU Gold")
+ elseif("${stdout}" MATCHES "^LLD")
+ set(LLVM_LINKER_IS_LLD ON)
+ message(STATUS "Linker detection: LLD")
+ elseif("${stdout}" MATCHES "GNU ld")
+ set(LLVM_LINKER_IS_GNULD ON)
+ message(STATUS "Linker detection: GNU ld")
+ elseif("${stderr}" MATCHES "Solaris Link Editors")
+ set(LLVM_LINKER_IS_SOLARISLD ON)
+ message(STATUS "Linker detection: Solaris ld")
+ else()
+ set(LLVM_LINKER_DETECTED OFF)
+ message(STATUS "Linker detection: unknown")
endif()
endif()
@@ -865,7 +880,7 @@ macro(add_llvm_utility name)
set_target_properties(${name} PROPERTIES FOLDER "Utils")
if( LLVM_INSTALL_UTILS AND LLVM_BUILD_UTILS )
install (TARGETS ${name}
- RUNTIME DESTINATION bin
+ RUNTIME DESTINATION ${LLVM_UTILS_INSTALL_DIR}
COMPONENT ${name})
if (NOT CMAKE_CONFIGURATION_TYPES)
add_custom_target(install-${name}
@@ -1159,11 +1174,6 @@ function(add_lit_target target comment)
list(APPEND LIT_ARGS --param build_mode=${CMAKE_CFG_INTDIR})
endif ()
if (EXISTS ${LLVM_MAIN_SRC_DIR}/utils/lit/lit.py)
- # reset cache after erraneous r283029
- # TODO: remove this once all buildbots run
- if (LIT_COMMAND STREQUAL "${PYTHON_EXECUTABLE} ${LLVM_MAIN_SRC_DIR}/utils/lit/lit.py")
- unset(LIT_COMMAND CACHE)
- endif()
set (LIT_COMMAND "${PYTHON_EXECUTABLE};${LLVM_MAIN_SRC_DIR}/utils/lit/lit.py"
CACHE STRING "Command used to spawn llvm-lit")
else()
diff --git a/cmake/modules/HandleLLVMOptions.cmake b/cmake/modules/HandleLLVMOptions.cmake
index 98f58d7b197d..0676317acc68 100644
--- a/cmake/modules/HandleLLVMOptions.cmake
+++ b/cmake/modules/HandleLLVMOptions.cmake
@@ -686,8 +686,8 @@ endif()
# lld doesn't print colored diagnostics when invoked from Ninja
if (UNIX AND CMAKE_GENERATOR STREQUAL "Ninja")
include(CheckLinkerFlag)
- check_linker_flag("-Wl,-color-diagnostics" LINKER_SUPPORTS_COLOR_DIAGNOSTICS)
- append_if(LINKER_SUPPORTS_COLOR_DIAGNOSTICS "-Wl,-color-diagnostics"
+ check_linker_flag("-Wl,--color-diagnostics" LINKER_SUPPORTS_COLOR_DIAGNOSTICS)
+ append_if(LINKER_SUPPORTS_COLOR_DIAGNOSTICS "-Wl,--color-diagnostics"
CMAKE_EXE_LINKER_FLAGS CMAKE_MODULE_LINKER_FLAGS CMAKE_SHARED_LINKER_FLAGS)
endif()
diff --git a/cmake/modules/LLVMExternalProjectUtils.cmake b/cmake/modules/LLVMExternalProjectUtils.cmake
index d457389f3ca3..c851eb8dbf08 100644
--- a/cmake/modules/LLVMExternalProjectUtils.cmake
+++ b/cmake/modules/LLVMExternalProjectUtils.cmake
@@ -195,8 +195,16 @@ function(llvm_ExternalProject_Add name source_dir)
# Add top-level targets
foreach(target ${ARG_EXTRA_TARGETS})
+ string(REPLACE ":" ";" target_list ${target})
+ list(GET target_list 0 target)
+ list(LENGTH target_list target_list_len)
+ if(${target_list_len} GREATER 1)
+ list(GET target_list 1 target_name)
+ else()
+ set(target_name "${target}")
+ endif()
llvm_ExternalProject_BuildCmd(build_runtime_cmd ${target} ${BINARY_DIR})
- add_custom_target(${target}
+ add_custom_target(${target_name}
COMMAND ${build_runtime_cmd}
DEPENDS ${name}-configure
WORKING_DIRECTORY ${BINARY_DIR}
diff --git a/docs/AMDGPUUsage.rst b/docs/AMDGPUUsage.rst
index 57822ae9ab0a..41c7ecba527f 100644
--- a/docs/AMDGPUUsage.rst
+++ b/docs/AMDGPUUsage.rst
@@ -190,9 +190,7 @@ names from both the *Processor* and *Alternative Processor* can be used.
gfx810 - stoney amdgcn APU
**GCN GFX9**
--------------------------------------------------------------------
- gfx900 amdgcn dGPU - FirePro W9500
- - FirePro S9500
- - FirePro S9500x2
+ gfx900 amdgcn dGPU - Radeon Vega Frontier Edition
gfx901 amdgcn dGPU ROCm Same as gfx900
except XNACK is
enabled
diff --git a/docs/CMake.rst b/docs/CMake.rst
index aeebc8f6acf9..bf97e9173158 100644
--- a/docs/CMake.rst
+++ b/docs/CMake.rst
@@ -536,6 +536,11 @@ LLVM-specific variables
during the build. Enabling this option can significantly speed up build times
especially when building LLVM in Debug configurations.
+**LLVM_REVERSE_ITERATION**:BOOL
+ If enabled, all supported unordered llvm containers would be iterated in
+ reverse order. This is useful for uncovering non-determinism caused by
+ iteration of unordered containers.
+
CMake Caches
============
diff --git a/docs/CMakePrimer.rst b/docs/CMakePrimer.rst
index 1e3a09e4d98a..c29d627ee62c 100644
--- a/docs/CMakePrimer.rst
+++ b/docs/CMakePrimer.rst
@@ -112,33 +112,6 @@ In this example the ``extra_sources`` variable is only defined if you're
targeting an Apple platform. For all other targets the ``extra_sources`` will be
evaluated as empty before add_executable is given its arguments.
-One big "Gotcha" with variable dereferencing is that ``if`` commands implicitly
-dereference values. This has some unexpected results. For example:
-
-.. code-block:: cmake
-
- if("${SOME_VAR}" STREQUAL "MSVC")
-
-In this code sample MSVC will be implicitly dereferenced, which will result in
-the if command comparing the value of the dereferenced variables ``SOME_VAR``
-and ``MSVC``. A common workaround to this solution is to prepend strings being
-compared with an ``x``.
-
-.. code-block:: cmake
-
- if("x${SOME_VAR}" STREQUAL "xMSVC")
-
-This works because while ``MSVC`` is a defined variable, ``xMSVC`` is not. This
-pattern is uncommon, but it does occur in LLVM's CMake scripts.
-
-.. note::
-
- Once the LLVM project upgrades its minimum CMake version to 3.1 or later we
- can prevent this behavior by setting CMP0054 to new. For more information on
- CMake policies please see the cmake-policies manpage or the `cmake-policies
- online documentation
- <https://cmake.org/cmake/help/v3.4/manual/cmake-policies.7.html>`_.
-
Lists
-----
diff --git a/docs/CommandGuide/lit.rst b/docs/CommandGuide/lit.rst
index b8299d44d48e..b4d15ef57b73 100644
--- a/docs/CommandGuide/lit.rst
+++ b/docs/CommandGuide/lit.rst
@@ -169,6 +169,13 @@ SELECTION OPTIONS
must be in the range ``1..M``. The environment variable
``LIT_RUN_SHARD`` can also be used in place of this option.
+.. option:: --filter=REGEXP
+
+ Run only those tests whose name matches the regular expression specified in
+ ``REGEXP``. The environment variable ``LIT_FILTER`` can be also used in place
+ of this option, which is especially useful in environments where the call
+ to ``lit`` is issued indirectly.
+
ADDITIONAL OPTIONS
------------------
diff --git a/docs/CommandGuide/llvm-cov.rst b/docs/CommandGuide/llvm-cov.rst
index ea2e625bc4d2..47db8d04e0b2 100644
--- a/docs/CommandGuide/llvm-cov.rst
+++ b/docs/CommandGuide/llvm-cov.rst
@@ -262,6 +262,12 @@ OPTIONS
The demangler is expected to read a newline-separated list of symbols from
stdin and write a newline-separated list of the same length to stdout.
+.. option:: -num-threads=N, -j=N
+
+ Use N threads to write file reports (only applicable when -output-dir is
+ specified). When N=0, llvm-cov auto-detects an appropriate number of threads to
+ use. This is the default.
+
.. option:: -line-coverage-gt=<N>
Show code coverage only for functions with line coverage greater than the
diff --git a/docs/CommandGuide/llvm-profdata.rst b/docs/CommandGuide/llvm-profdata.rst
index f7aa8309485b..5b6330b5dc40 100644
--- a/docs/CommandGuide/llvm-profdata.rst
+++ b/docs/CommandGuide/llvm-profdata.rst
@@ -192,6 +192,12 @@ OPTIONS
information is dumped in a more human readable form (also in text) with
annotations.
+.. option:: -topn=n
+
+ Instruct the profile dumper to show the top ``n`` functions with the
+ hottest basic blocks in the summary section. By default, the topn functions
+ are not dumped.
+
.. option:: -sample
Specify that the input profile is a sample-based profile.
diff --git a/docs/Coroutines.rst b/docs/Coroutines.rst
index f7a38577fe8e..1bea04ebdd2a 100644
--- a/docs/Coroutines.rst
+++ b/docs/Coroutines.rst
@@ -846,7 +846,7 @@ Overview:
"""""""""
The '``llvm.coro.alloc``' intrinsic returns `true` if dynamic allocation is
-required to obtain a memory for the corutine frame and `false` otherwise.
+required to obtain a memory for the coroutine frame and `false` otherwise.
Arguments:
""""""""""
diff --git a/docs/Docker.rst b/docs/Docker.rst
index d873e1ebeeb4..e606e1b71a2c 100644
--- a/docs/Docker.rst
+++ b/docs/Docker.rst
@@ -88,15 +88,11 @@ compiled by the system compiler in the debian8 image:
./llvm/utils/docker/build_docker_image.sh \
--source debian8 \
--docker-repository clang-debian8 --docker-tag "staging" \
- -- \
-p clang -i install-clang -i install-clang-headers \
-- \
-DCMAKE_BUILD_TYPE=Release
-Note there are two levels of ``--`` indirection. First one separates
-``build_docker_image.sh`` arguments from ``llvm/utils/build_install_llvm.sh``
-arguments. Second one separates CMake arguments from ``build_install_llvm.sh``
-arguments. Note that build like that doesn't use a 2-stage build process that
+Note that a build like that doesn't use a 2-stage build process that
you probably want for clang. Running a 2-stage build is a little more intricate,
this command will do that:
@@ -108,7 +104,6 @@ this command will do that:
./build_docker_image.sh \
--source debian8 \
--docker-repository clang-debian8 --docker-tag "staging" \
- -- \
-p clang -i stage2-install-clang -i stage2-install-clang-headers \
-- \
-DLLVM_TARGETS_TO_BUILD=Native -DCMAKE_BUILD_TYPE=Release \
@@ -178,7 +173,6 @@ debian8-based image using the latest ``google/stable`` sources for you:
./llvm/utils/docker/build_docker_image.sh \
-s debian8 --d clang-debian8 -t "staging" \
- -- \
--branch branches/google/stable \
-p clang -i install-clang -i install-clang-headers \
-- \
diff --git a/docs/Dummy.html b/docs/Dummy.html
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/docs/Dummy.html
+++ /dev/null
diff --git a/docs/HowToAddABuilder.rst b/docs/HowToAddABuilder.rst
index 08cbecdc2a57..201c71b21391 100644
--- a/docs/HowToAddABuilder.rst
+++ b/docs/HowToAddABuilder.rst
@@ -62,6 +62,9 @@ Here are the steps you can follow to do so:
lab.llvm.org:9990 \
<buildslave-access-name> <buildslave-access-password>
+ To point a slave to silent master please use lab.llvm.org:9994 instead
+ of lab.llvm.org:9990.
+
#. Fill the buildslave description and admin name/e-mail. Here is an
example of the buildslave description::
diff --git a/docs/LangRef.rst b/docs/LangRef.rst
index 2a0812ab930f..44efc1498060 100644
--- a/docs/LangRef.rst
+++ b/docs/LangRef.rst
@@ -2209,12 +2209,21 @@ For a simpler introduction to the ordering constraints, see the
same address in this global order. This corresponds to the C++0x/C1x
``memory_order_seq_cst`` and Java volatile.
-.. _singlethread:
+.. _syncscope:
-If an atomic operation is marked ``singlethread``, it only *synchronizes
-with* or participates in modification and seq\_cst total orderings with
-other operations running in the same thread (for example, in signal
-handlers).
+If an atomic operation is marked ``syncscope("singlethread")``, it only
+*synchronizes with* and only participates in the seq\_cst total orderings of
+other operations running in the same thread (for example, in signal handlers).
+
+If an atomic operation is marked ``syncscope("<target-scope>")``, where
+``<target-scope>`` is a target specific synchronization scope, then it is target
+dependent if it *synchronizes with* and participates in the seq\_cst total
+orderings of other operations.
+
+Otherwise, an atomic operation that is not marked ``syncscope("singlethread")``
+or ``syncscope("<target-scope>")`` *synchronizes with* and participates in the
+seq\_cst total orderings of other operations that are not marked
+``syncscope("singlethread")`` or ``syncscope("<target-scope>")``.
.. _fastmath:
@@ -5034,7 +5043,7 @@ which is the string ``llvm.loop.licm_versioning.disable``. For example:
Loop distribution allows splitting a loop into multiple loops. Currently,
this is only performed if the entire loop cannot be vectorized due to unsafe
-memory dependencies. The transformation will atempt to isolate the unsafe
+memory dependencies. The transformation will attempt to isolate the unsafe
dependencies into their own loop.
This metadata can be used to selectively enable or disable distribution of the
@@ -7380,7 +7389,7 @@ Syntax:
::
<result> = load [volatile] <ty>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>][, !invariant.load !<index>][, !invariant.group !<index>][, !nonnull !<index>][, !dereferenceable !<deref_bytes_node>][, !dereferenceable_or_null !<deref_bytes_node>][, !align !<align_node>]
- <result> = load atomic [volatile] <ty>, <ty>* <pointer> [singlethread] <ordering>, align <alignment> [, !invariant.group !<index>]
+ <result> = load atomic [volatile] <ty>, <ty>* <pointer> [syncscope("<target-scope>")] <ordering>, align <alignment> [, !invariant.group !<index>]
!<index> = !{ i32 1 }
!<deref_bytes_node> = !{i64 <dereferenceable_bytes>}
!<align_node> = !{ i64 <value_alignment> }
@@ -7401,14 +7410,14 @@ modify the number or order of execution of this ``load`` with other
:ref:`volatile operations <volatile>`.
If the ``load`` is marked as ``atomic``, it takes an extra :ref:`ordering
-<ordering>` and optional ``singlethread`` argument. The ``release`` and
-``acq_rel`` orderings are not valid on ``load`` instructions. Atomic loads
-produce :ref:`defined <memmodel>` results when they may see multiple atomic
-stores. The type of the pointee must be an integer, pointer, or floating-point
-type whose bit width is a power of two greater than or equal to eight and less
-than or equal to a target-specific size limit. ``align`` must be explicitly
-specified on atomic loads, and the load has undefined behavior if the alignment
-is not set to a value which is at least the size in bytes of the
+<ordering>` and optional ``syncscope("<target-scope>")`` argument. The
+``release`` and ``acq_rel`` orderings are not valid on ``load`` instructions.
+Atomic loads produce :ref:`defined <memmodel>` results when they may see
+multiple atomic stores. The type of the pointee must be an integer, pointer, or
+floating-point type whose bit width is a power of two greater than or equal to
+eight and less than or equal to a target-specific size limit. ``align`` must be
+explicitly specified on atomic loads, and the load has undefined behavior if the
+alignment is not set to a value which is at least the size in bytes of the
pointee. ``!nontemporal`` does not have any defined semantics for atomic loads.
The optional constant ``align`` argument specifies the alignment of the
@@ -7509,7 +7518,7 @@ Syntax:
::
store [volatile] <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>][, !invariant.group !<index>] ; yields void
- store atomic [volatile] <ty> <value>, <ty>* <pointer> [singlethread] <ordering>, align <alignment> [, !invariant.group !<index>] ; yields void
+ store atomic [volatile] <ty> <value>, <ty>* <pointer> [syncscope("<target-scope>")] <ordering>, align <alignment> [, !invariant.group !<index>] ; yields void
Overview:
"""""""""
@@ -7529,14 +7538,14 @@ allowed to modify the number or order of execution of this ``store`` with other
structural type <t_opaque>`) can be stored.
If the ``store`` is marked as ``atomic``, it takes an extra :ref:`ordering
-<ordering>` and optional ``singlethread`` argument. The ``acquire`` and
-``acq_rel`` orderings aren't valid on ``store`` instructions. Atomic loads
-produce :ref:`defined <memmodel>` results when they may see multiple atomic
-stores. The type of the pointee must be an integer, pointer, or floating-point
-type whose bit width is a power of two greater than or equal to eight and less
-than or equal to a target-specific size limit. ``align`` must be explicitly
-specified on atomic stores, and the store has undefined behavior if the
-alignment is not set to a value which is at least the size in bytes of the
+<ordering>` and optional ``syncscope("<target-scope>")`` argument. The
+``acquire`` and ``acq_rel`` orderings aren't valid on ``store`` instructions.
+Atomic loads produce :ref:`defined <memmodel>` results when they may see
+multiple atomic stores. The type of the pointee must be an integer, pointer, or
+floating-point type whose bit width is a power of two greater than or equal to
+eight and less than or equal to a target-specific size limit. ``align`` must be
+explicitly specified on atomic stores, and the store has undefined behavior if
+the alignment is not set to a value which is at least the size in bytes of the
pointee. ``!nontemporal`` does not have any defined semantics for atomic stores.
The optional constant ``align`` argument specifies the alignment of the
@@ -7597,7 +7606,7 @@ Syntax:
::
- fence [singlethread] <ordering> ; yields void
+ fence [syncscope("<target-scope>")] <ordering> ; yields void
Overview:
"""""""""
@@ -7631,17 +7640,17 @@ A ``fence`` which has ``seq_cst`` ordering, in addition to having both
``acquire`` and ``release`` semantics specified above, participates in
the global program order of other ``seq_cst`` operations and/or fences.
-The optional ":ref:`singlethread <singlethread>`" argument specifies
-that the fence only synchronizes with other fences in the same thread.
-(This is useful for interacting with signal handlers.)
+A ``fence`` instruction can also take an optional
+":ref:`syncscope <syncscope>`" argument.
Example:
""""""""
.. code-block:: llvm
- fence acquire ; yields void
- fence singlethread seq_cst ; yields void
+ fence acquire ; yields void
+ fence syncscope("singlethread") seq_cst ; yields void
+ fence syncscope("agent") seq_cst ; yields void
.. _i_cmpxchg:
@@ -7653,7 +7662,7 @@ Syntax:
::
- cmpxchg [weak] [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <success ordering> <failure ordering> ; yields { ty, i1 }
+ cmpxchg [weak] [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [syncscope("<target-scope>")] <success ordering> <failure ordering> ; yields { ty, i1 }
Overview:
"""""""""
@@ -7682,10 +7691,8 @@ must be at least ``monotonic``, the ordering constraint on failure must be no
stronger than that on success, and the failure ordering cannot be either
``release`` or ``acq_rel``.
-The optional "``singlethread``" argument declares that the ``cmpxchg``
-is only atomic with respect to code (usually signal handlers) running in
-the same thread as the ``cmpxchg``. Otherwise the cmpxchg is atomic with
-respect to all other code in the system.
+A ``cmpxchg`` instruction can also take an optional
+":ref:`syncscope <syncscope>`" argument.
The pointer passed into cmpxchg must have alignment greater than or
equal to the size in memory of the operand.
@@ -7739,7 +7746,7 @@ Syntax:
::
- atomicrmw [volatile] <operation> <ty>* <pointer>, <ty> <value> [singlethread] <ordering> ; yields ty
+ atomicrmw [volatile] <operation> <ty>* <pointer>, <ty> <value> [syncscope("<target-scope>")] <ordering> ; yields ty
Overview:
"""""""""
@@ -7773,6 +7780,9 @@ be a pointer to that type. If the ``atomicrmw`` is marked as
order of execution of this ``atomicrmw`` with other :ref:`volatile
operations <volatile>`.
+A ``atomicrmw`` instruction can also take an optional
+":ref:`syncscope <syncscope>`" argument.
+
Semantics:
""""""""""
@@ -10272,6 +10282,8 @@ overlap. It copies "len" bytes of memory over. If the argument is known
to be aligned to some boundary, this can be specified as the fourth
argument, otherwise it should be set to 0 or 1 (both meaning no alignment).
+.. _int_memmove:
+
'``llvm.memmove``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -10327,6 +10339,8 @@ copies "len" bytes of memory over. If the argument is known to be
aligned to some boundary, this can be specified as the fourth argument,
otherwise it should be set to 0 or 1 (both meaning no alignment).
+.. _int_memset:
+
'``llvm.memset.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -14168,4 +14182,154 @@ In the most general case call to the '``llvm.memcpy.element.unordered.atomic.*``
lowered to a call to the symbol ``__llvm_memcpy_element_unordered_atomic_*``. Where '*'
is replaced with an actual element size.
+Optimizer is allowed to inline memory copy when it's profitable to do so.
+
+'``llvm.memmove.element.unordered.atomic``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+This is an overloaded intrinsic. You can use
+``llvm.memmove.element.unordered.atomic`` on any integer bit width and for
+different address spaces. Not all targets support all bit widths however.
+
+::
+
+ declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* <dest>,
+ i8* <src>,
+ i32 <len>,
+ i32 <element_size>)
+ declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* <dest>,
+ i8* <src>,
+ i64 <len>,
+ i32 <element_size>)
+
+Overview:
+"""""""""
+
+The '``llvm.memmove.element.unordered.atomic.*``' intrinsic is a specialization
+of the '``llvm.memmove.*``' intrinsic. It differs in that the ``dest`` and
+``src`` are treated as arrays with elements that are exactly ``element_size``
+bytes, and the copy between buffers uses a sequence of
+:ref:`unordered atomic <ordering>` load/store operations that are a positive
+integer multiple of the ``element_size`` in size.
+
+Arguments:
+""""""""""
+
+The first three arguments are the same as they are in the
+:ref:`@llvm.memmove <int_memmove>` intrinsic, with the added constraint that
+``len`` is required to be a positive integer multiple of the ``element_size``.
+If ``len`` is not a positive integer multiple of ``element_size``, then the
+behaviour of the intrinsic is undefined.
+
+``element_size`` must be a compile-time constant positive power of two no
+greater than a target-specific atomic access size limit.
+
+For each of the input pointers the ``align`` parameter attribute must be
+specified. It must be a power of two no less than the ``element_size``. Caller
+guarantees that both the source and destination pointers are aligned to that
+boundary.
+
+Semantics:
+""""""""""
+
+The '``llvm.memmove.element.unordered.atomic.*``' intrinsic copies ``len`` bytes
+of memory from the source location to the destination location. These locations
+are allowed to overlap. The memory copy is performed as a sequence of load/store
+operations where each access is guaranteed to be a multiple of ``element_size``
+bytes wide and aligned at an ``element_size`` boundary.
+
+The order of the copy is unspecified. The same value may be read from the source
+buffer many times, but only one write is issued to the destination buffer per
+element. It is well defined to have concurrent reads and writes to both source
+and destination provided those reads and writes are unordered atomic when
+specified.
+
+This intrinsic does not provide any additional ordering guarantees over those
+provided by a set of unordered loads from the source location and stores to the
+destination.
+
+Lowering:
+"""""""""
+
+In the most general case call to the
+'``llvm.memmove.element.unordered.atomic.*``' is lowered to a call to the symbol
+``__llvm_memmove_element_unordered_atomic_*``. Where '*' is replaced with an
+actual element size.
+
The optimizer is allowed to inline the memory copy when it's profitable to do so.
+
+.. _int_memset_element_unordered_atomic:
+
+'``llvm.memset.element.unordered.atomic``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+This is an overloaded intrinsic. You can use ``llvm.memset.element.unordered.atomic`` on
+any integer bit width and for different address spaces. Not all targets
+support all bit widths however.
+
+::
+
+ declare void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* <dest>,
+ i8 <value>,
+ i32 <len>,
+ i32 <element_size>)
+ declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* <dest>,
+ i8 <value>,
+ i64 <len>,
+ i32 <element_size>)
+
+Overview:
+"""""""""
+
+The '``llvm.memset.element.unordered.atomic.*``' intrinsic is a specialization of the
+'``llvm.memset.*``' intrinsic. It differs in that the ``dest`` is treated as an array
+with elements that are exactly ``element_size`` bytes, and the assignment to that array
+uses uses a sequence of :ref:`unordered atomic <ordering>` store operations
+that are a positive integer multiple of the ``element_size`` in size.
+
+Arguments:
+""""""""""
+
+The first three arguments are the same as they are in the :ref:`@llvm.memset <int_memset>`
+intrinsic, with the added constraint that ``len`` is required to be a positive integer
+multiple of the ``element_size``. If ``len`` is not a positive integer multiple of
+``element_size``, then the behaviour of the intrinsic is undefined.
+
+``element_size`` must be a compile-time constant positive power of two no greater than
+target-specific atomic access size limit.
+
+The ``dest`` input pointer must have the ``align`` parameter attribute specified. It
+must be a power of two no less than the ``element_size``. Caller guarantees that
+the destination pointer is aligned to that boundary.
+
+Semantics:
+""""""""""
+
+The '``llvm.memset.element.unordered.atomic.*``' intrinsic sets the ``len`` bytes of
+memory starting at the destination location to the given ``value``. The memory is
+set with a sequence of store operations where each access is guaranteed to be a
+multiple of ``element_size`` bytes wide and aligned at an ``element_size`` boundary.
+
+The order of the assignment is unspecified. Only one write is issued to the
+destination buffer per element. It is well defined to have concurrent reads and
+writes to the destination provided those reads and writes are unordered atomic
+when specified.
+
+This intrinsic does not provide any additional ordering guarantees over those
+provided by a set of unordered stores to the destination.
+
+Lowering:
+"""""""""
+
+In the most general case call to the '``llvm.memset.element.unordered.atomic.*``' is
+lowered to a call to the symbol ``__llvm_memset_element_unordered_atomic_*``. Where '*'
+is replaced with an actual element size.
+
+The optimizer is allowed to inline the memory assignment when it's profitable to do so.
+
diff --git a/docs/LibFuzzer.rst b/docs/LibFuzzer.rst
index 5acfa04ce1f4..0f0b0e2e6fbd 100644
--- a/docs/LibFuzzer.rst
+++ b/docs/LibFuzzer.rst
@@ -587,7 +587,7 @@ The simplest way is to have a statically initialized global object inside
Alternatively, you may define an optional init function and it will receive
the program arguments that you can read and modify. Do this **only** if you
-realy need to access ``argv``/``argc``.
+really need to access ``argv``/``argc``.
.. code-block:: c++
diff --git a/docs/tutorial/BuildingAJIT1.rst b/docs/tutorial/BuildingAJIT1.rst
index 625cbbba1a5c..88f7aa5abbc7 100644
--- a/docs/tutorial/BuildingAJIT1.rst
+++ b/docs/tutorial/BuildingAJIT1.rst
@@ -12,7 +12,7 @@ Welcome to Chapter 1 of the "Building an ORC-based JIT in LLVM" tutorial. This
tutorial runs through the implementation of a JIT compiler using LLVM's
On-Request-Compilation (ORC) APIs. It begins with a simplified version of the
KaleidoscopeJIT class used in the
-`Implementing a language with LLVM <LangImpl1.html>`_ tutorials and then
+`Implementing a language with LLVM <LangImpl01.html>`_ tutorials and then
introduces new features like optimization, lazy compilation and remote
execution.
@@ -41,7 +41,7 @@ The structure of the tutorial is:
a remote process with reduced privileges using the JIT Remote APIs.
To provide input for our JIT we will use the Kaleidoscope REPL from
-`Chapter 7 <LangImpl7.html>`_ of the "Implementing a language in LLVM tutorial",
+`Chapter 7 <LangImpl07.html>`_ of the "Implementing a language in LLVM tutorial",
with one minor modification: We will remove the FunctionPassManager from the
code for that chapter and replace it with optimization support in our JIT class
in Chapter #2.
@@ -91,8 +91,8 @@ KaleidoscopeJIT
In the previous section we described our API, now we examine a simple
implementation of it: The KaleidoscopeJIT class [1]_ that was used in the
-`Implementing a language with LLVM <LangImpl1.html>`_ tutorials. We will use
-the REPL code from `Chapter 7 <LangImpl7.html>`_ of that tutorial to supply the
+`Implementing a language with LLVM <LangImpl01.html>`_ tutorials. We will use
+the REPL code from `Chapter 7 <LangImpl07.html>`_ of that tutorial to supply the
input for our JIT: Each time the user enters an expression the REPL will add a
new IR module containing the code for that expression to the JIT. If the
expression is a top-level expression like '1+1' or 'sin(x)', the REPL will also
diff --git a/docs/tutorial/BuildingAJIT2.rst b/docs/tutorial/BuildingAJIT2.rst
index 839875266a24..2f22bdad6c14 100644
--- a/docs/tutorial/BuildingAJIT2.rst
+++ b/docs/tutorial/BuildingAJIT2.rst
@@ -25,7 +25,7 @@ IRTransformLayer, to add IR optimization support to KaleidoscopeJIT.
Optimizing Modules using the IRTransformLayer
=============================================
-In `Chapter 4 <LangImpl4.html>`_ of the "Implementing a language with LLVM"
+In `Chapter 4 <LangImpl04.html>`_ of the "Implementing a language with LLVM"
tutorial series the llvm *FunctionPassManager* is introduced as a means for
optimizing LLVM IR. Interested readers may read that chapter for details, but
in short: to optimize a Module we create an llvm::FunctionPassManager
@@ -148,7 +148,7 @@ At the bottom of our JIT we add a private method to do the actual optimization:
*optimizeModule*. This function sets up a FunctionPassManager, adds some passes
to it, runs it over every function in the module, and then returns the mutated
module. The specific optimizations are the same ones used in
-`Chapter 4 <LangImpl4.html>`_ of the "Implementing a language with LLVM"
+`Chapter 4 <LangImpl04.html>`_ of the "Implementing a language with LLVM"
tutorial series. Readers may visit that chapter for a more in-depth
discussion of these, and of IR optimization in general.
diff --git a/docs/tutorial/LangImpl02.rst b/docs/tutorial/LangImpl02.rst
index 4be447eb5ba3..d72c8dc9add4 100644
--- a/docs/tutorial/LangImpl02.rst
+++ b/docs/tutorial/LangImpl02.rst
@@ -10,7 +10,7 @@ Chapter 2 Introduction
Welcome to Chapter 2 of the "`Implementing a language with
LLVM <index.html>`_" tutorial. This chapter shows you how to use the
-lexer, built in `Chapter 1 <LangImpl1.html>`_, to build a full
+lexer, built in `Chapter 1 <LangImpl01.html>`_, to build a full
`parser <http://en.wikipedia.org/wiki/Parsing>`_ for our Kaleidoscope
language. Once we have a parser, we'll define and build an `Abstract
Syntax Tree <http://en.wikipedia.org/wiki/Abstract_syntax_tree>`_ (AST).
diff --git a/docs/tutorial/LangImpl03.rst b/docs/tutorial/LangImpl03.rst
index 1dfe10175c74..fab2ddaf8829 100644
--- a/docs/tutorial/LangImpl03.rst
+++ b/docs/tutorial/LangImpl03.rst
@@ -10,7 +10,7 @@ Chapter 3 Introduction
Welcome to Chapter 3 of the "`Implementing a language with
LLVM <index.html>`_" tutorial. This chapter shows you how to transform
-the `Abstract Syntax Tree <LangImpl2.html>`_, built in Chapter 2, into
+the `Abstract Syntax Tree <LangImpl02.html>`_, built in Chapter 2, into
LLVM IR. This will teach you a little bit about how LLVM does things, as
well as demonstrate how easy it is to use. It's much more work to build
a lexer and parser than it is to generate LLVM IR code. :)
@@ -362,7 +362,7 @@ end of the new basic block. Basic blocks in LLVM are an important part
of functions that define the `Control Flow
Graph <http://en.wikipedia.org/wiki/Control_flow_graph>`_. Since we
don't have any control flow, our functions will only contain one block
-at this point. We'll fix this in `Chapter 5 <LangImpl5.html>`_ :).
+at this point. We'll fix this in `Chapter 5 <LangImpl05.html>`_ :).
Next we add the function arguments to the NamedValues map (after first clearing
it out) so that they're accessible to ``VariableExprAST`` nodes.
@@ -540,7 +540,7 @@ functions referencing each other.
This wraps up the third chapter of the Kaleidoscope tutorial. Up next,
we'll describe how to `add JIT codegen and optimizer
-support <LangImpl4.html>`_ to this so we can actually start running
+support <LangImpl04.html>`_ to this so we can actually start running
code!
Full Code Listing
diff --git a/docs/tutorial/LangImpl04.rst b/docs/tutorial/LangImpl04.rst
index 16d7164ae15e..921c4dcc21ad 100644
--- a/docs/tutorial/LangImpl04.rst
+++ b/docs/tutorial/LangImpl04.rst
@@ -622,7 +622,7 @@ This completes the JIT and optimizer chapter of the Kaleidoscope
tutorial. At this point, we can compile a non-Turing-complete
programming language, optimize and JIT compile it in a user-driven way.
Next up we'll look into `extending the language with control flow
-constructs <LangImpl5.html>`_, tackling some interesting LLVM IR issues
+constructs <LangImpl05.html>`_, tackling some interesting LLVM IR issues
along the way.
Full Code Listing
diff --git a/docs/tutorial/LangImpl05.rst b/docs/tutorial/LangImpl05.rst
index dcf45bcbf8d2..8650892e8f8b 100644
--- a/docs/tutorial/LangImpl05.rst
+++ b/docs/tutorial/LangImpl05.rst
@@ -269,7 +269,7 @@ Phi nodes:
#. Values that are implicit in the structure of your AST, such as the
Phi node in this case.
-In `Chapter 7 <LangImpl7.html>`_ of this tutorial ("mutable variables"),
+In `Chapter 7 <LangImpl07.html>`_ of this tutorial ("mutable variables"),
we'll talk about #1 in depth. For now, just believe me that you don't
need SSA construction to handle this case. For #2, you have the choice
of using the techniques that we will describe for #1, or you can insert
@@ -790,7 +790,7 @@ of the tutorial. In this chapter we added two control flow constructs,
and used them to motivate a couple of aspects of the LLVM IR that are
important for front-end implementors to know. In the next chapter of our
saga, we will get a bit crazier and add `user-defined
-operators <LangImpl6.html>`_ to our poor innocent language.
+operators <LangImpl06.html>`_ to our poor innocent language.
Full Code Listing
=================
diff --git a/docs/tutorial/LangImpl06.rst b/docs/tutorial/LangImpl06.rst
index c1035bce8559..cb8ec766bb26 100644
--- a/docs/tutorial/LangImpl06.rst
+++ b/docs/tutorial/LangImpl06.rst
@@ -41,7 +41,7 @@ The point of going into user-defined operators in a tutorial like this
is to show the power and flexibility of using a hand-written parser.
Thus far, the parser we have been implementing uses recursive descent
for most parts of the grammar and operator precedence parsing for the
-expressions. See `Chapter 2 <LangImpl2.html>`_ for details. By
+expressions. See `Chapter 2 <LangImpl02.html>`_ for details. By
using operator precedence parsing, it is very easy to allow
the programmer to introduce new operators into the grammar: the grammar
is dynamically extensible as the JIT runs.
@@ -734,7 +734,7 @@ side-effects, but it can't actually define and mutate a variable itself.
Strikingly, variable mutation is an important feature of some languages,
and it is not at all obvious how to `add support for mutable
-variables <LangImpl7.html>`_ without having to add an "SSA construction"
+variables <LangImpl07.html>`_ without having to add an "SSA construction"
phase to your front-end. In the next chapter, we will describe how you
can add variable mutation without building SSA in your front-end.
diff --git a/docs/tutorial/OCamlLangImpl5.rst b/docs/tutorial/OCamlLangImpl5.rst
index 6e17de4b2bde..d06bf6ec252a 100644
--- a/docs/tutorial/OCamlLangImpl5.rst
+++ b/docs/tutorial/OCamlLangImpl5.rst
@@ -258,7 +258,7 @@ a truth value as a 1-bit (bool) value.
let then_bb = append_block context "then" the_function in
position_at_end then_bb builder;
-As opposed to the `C++ tutorial <LangImpl5.html>`_, we have to build our
+As opposed to the `C++ tutorial <LangImpl05.html>`_, we have to build our
basic blocks bottom up since we can't have dangling BasicBlocks. We
start off by saving a pointer to the first block (which might not be the
entry block), which we'll need to build a conditional branch later. We
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter1/KaleidoscopeJIT.h b/examples/Kaleidoscope/BuildingAJIT/Chapter1/KaleidoscopeJIT.h
index f99722f60e91..5a2148a14a14 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter1/KaleidoscopeJIT.h
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter1/KaleidoscopeJIT.h
@@ -48,6 +48,7 @@ public:
KaleidoscopeJIT()
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
+ ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)) {
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
}
@@ -74,9 +75,8 @@ public:
// Add the set to the JIT with the resolver we created above and a newly
// created SectionMemoryManager.
- return CompileLayer.addModule(std::move(M),
- make_unique<SectionMemoryManager>(),
- std::move(Resolver));
+ return cantFail(CompileLayer.addModule(std::move(M),
+ std::move(Resolver)));
}
JITSymbol findSymbol(const std::string Name) {
@@ -87,7 +87,7 @@ public:
}
void removeModule(ModuleHandle H) {
- CompileLayer.removeModule(H);
+ cantFail(CompileLayer.removeModule(H));
}
};
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp b/examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp
index 163caa6872d7..2471344c6d65 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp
@@ -1150,7 +1150,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- double (*FP)() = (double (*)())(intptr_t)ExprSymbol.getAddress();
+ double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
fprintf(stderr, "Evaluated to %f\n", FP());
// Delete the anonymous expression module from the JIT.
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter2/KaleidoscopeJIT.h b/examples/Kaleidoscope/BuildingAJIT/Chapter2/KaleidoscopeJIT.h
index 2cd4ed79aafa..9a295f1566cb 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter2/KaleidoscopeJIT.h
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter2/KaleidoscopeJIT.h
@@ -57,6 +57,7 @@ public:
KaleidoscopeJIT()
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
+ ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)),
OptimizeLayer(CompileLayer,
[this](std::shared_ptr<Module> M) {
@@ -87,9 +88,8 @@ public:
// Add the set to the JIT with the resolver we created above and a newly
// created SectionMemoryManager.
- return OptimizeLayer.addModule(std::move(M),
- make_unique<SectionMemoryManager>(),
- std::move(Resolver));
+ return cantFail(OptimizeLayer.addModule(std::move(M),
+ std::move(Resolver)));
}
JITSymbol findSymbol(const std::string Name) {
@@ -100,7 +100,7 @@ public:
}
void removeModule(ModuleHandle H) {
- OptimizeLayer.removeModule(H);
+ cantFail(OptimizeLayer.removeModule(H));
}
private:
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp b/examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp
index 163caa6872d7..2471344c6d65 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp
@@ -1150,7 +1150,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- double (*FP)() = (double (*)())(intptr_t)ExprSymbol.getAddress();
+ double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
fprintf(stderr, "Evaluated to %f\n", FP());
// Delete the anonymous expression module from the JIT.
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter3/KaleidoscopeJIT.h b/examples/Kaleidoscope/BuildingAJIT/Chapter3/KaleidoscopeJIT.h
index f6fb3071d526..a03f5ce5e238 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter3/KaleidoscopeJIT.h
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter3/KaleidoscopeJIT.h
@@ -63,6 +63,7 @@ public:
KaleidoscopeJIT()
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
+ ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)),
OptimizeLayer(CompileLayer,
[this](std::shared_ptr<Module> M) {
@@ -100,9 +101,7 @@ public:
// Add the set to the JIT with the resolver we created above and a newly
// created SectionMemoryManager.
- return CODLayer.addModule(std::move(M),
- make_unique<SectionMemoryManager>(),
- std::move(Resolver));
+ return cantFail(CODLayer.addModule(std::move(M), std::move(Resolver)));
}
JITSymbol findSymbol(const std::string Name) {
@@ -113,7 +112,7 @@ public:
}
void removeModule(ModuleHandle H) {
- CODLayer.removeModule(H);
+ cantFail(CODLayer.removeModule(H));
}
private:
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp b/examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp
index 163caa6872d7..2471344c6d65 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp
@@ -1150,7 +1150,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- double (*FP)() = (double (*)())(intptr_t)ExprSymbol.getAddress();
+ double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
fprintf(stderr, "Evaluated to %f\n", FP());
// Delete the anonymous expression module from the JIT.
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter4/KaleidoscopeJIT.h b/examples/Kaleidoscope/BuildingAJIT/Chapter4/KaleidoscopeJIT.h
index d45874e9a693..d10e4748f1a1 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter4/KaleidoscopeJIT.h
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter4/KaleidoscopeJIT.h
@@ -90,6 +90,7 @@ public:
KaleidoscopeJIT()
: TM(EngineBuilder().selectTarget()),
DL(TM->createDataLayout()),
+ ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)),
OptimizeLayer(CompileLayer,
[this](std::shared_ptr<Module> M) {
@@ -127,9 +128,8 @@ public:
// Add the set to the JIT with the resolver we created above and a newly
// created SectionMemoryManager.
- return OptimizeLayer.addModule(std::move(M),
- make_unique<SectionMemoryManager>(),
- std::move(Resolver));
+ return cantFail(OptimizeLayer.addModule(std::move(M),
+ std::move(Resolver)));
}
Error addFunctionAST(std::unique_ptr<FunctionAST> FnAST) {
@@ -175,7 +175,7 @@ public:
addModule(std::move(M));
auto Sym = findSymbol(SharedFnAST->getName() + "$impl");
assert(Sym && "Couldn't find compiled function?");
- JITTargetAddress SymAddr = Sym.getAddress();
+ JITTargetAddress SymAddr = cantFail(Sym.getAddress());
if (auto Err =
IndirectStubsMgr->updatePointer(mangle(SharedFnAST->getName()),
SymAddr)) {
@@ -195,7 +195,7 @@ public:
}
void removeModule(ModuleHandle H) {
- OptimizeLayer.removeModule(H);
+ cantFail(OptimizeLayer.removeModule(H));
}
private:
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter4/toy.cpp b/examples/Kaleidoscope/BuildingAJIT/Chapter4/toy.cpp
index ff4b5220105b..ed8ae31ba0fd 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter4/toy.cpp
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter4/toy.cpp
@@ -1153,7 +1153,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- double (*FP)() = (double (*)())(intptr_t)ExprSymbol.getAddress();
+ double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
fprintf(stderr, "Evaluated to %f\n", FP());
// Delete the anonymous expression module from the JIT.
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter5/KaleidoscopeJIT.h b/examples/Kaleidoscope/BuildingAJIT/Chapter5/KaleidoscopeJIT.h
index e889c6d34322..7ea535b3af53 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter5/KaleidoscopeJIT.h
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter5/KaleidoscopeJIT.h
@@ -97,6 +97,15 @@ public:
: TM(EngineBuilder().selectTarget(Triple(Remote.getTargetTriple()), "",
"", SmallVector<std::string, 0>())),
DL(TM->createDataLayout()),
+ ObjectLayer([&Remote]() {
+ std::unique_ptr<MyRemote::RCMemoryManager> MemMgr;
+ if (auto Err = Remote.createRemoteMemoryManager(MemMgr)) {
+ logAllUnhandledErrors(std::move(Err), errs(),
+ "Error creating remote memory manager:");
+ exit(1);
+ }
+ return MemMgr;
+ }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)),
OptimizeLayer(CompileLayer,
[this](std::shared_ptr<Module> M) {
@@ -146,18 +155,10 @@ public:
return JITSymbol(nullptr);
});
- std::unique_ptr<MyRemote::RCMemoryManager> MemMgr;
- if (auto Err = Remote.createRemoteMemoryManager(MemMgr)) {
- logAllUnhandledErrors(std::move(Err), errs(),
- "Error creating remote memory manager:");
- exit(1);
- }
-
// Add the set to the JIT with the resolver we created above and a newly
// created SectionMemoryManager.
- return OptimizeLayer.addModule(std::move(M),
- std::move(MemMgr),
- std::move(Resolver));
+ return cantFail(OptimizeLayer.addModule(std::move(M),
+ std::move(Resolver)));
}
Error addFunctionAST(std::unique_ptr<FunctionAST> FnAST) {
@@ -203,7 +204,7 @@ public:
addModule(std::move(M));
auto Sym = findSymbol(SharedFnAST->getName() + "$impl");
assert(Sym && "Couldn't find compiled function?");
- JITTargetAddress SymAddr = Sym.getAddress();
+ JITTargetAddress SymAddr = cantFail(Sym.getAddress());
if (auto Err =
IndirectStubsMgr->updatePointer(mangle(SharedFnAST->getName()),
SymAddr)) {
@@ -227,7 +228,7 @@ public:
}
void removeModule(ModuleHandle H) {
- OptimizeLayer.removeModule(H);
+ cantFail(OptimizeLayer.removeModule(H));
}
private:
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter5/toy.cpp b/examples/Kaleidoscope/BuildingAJIT/Chapter5/toy.cpp
index edd050959d6b..7bbc06a0958f 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter5/toy.cpp
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter5/toy.cpp
@@ -1177,7 +1177,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- ExitOnErr(TheJIT->executeRemoteExpr(ExprSymbol.getAddress()));
+ ExitOnErr(TheJIT->executeRemoteExpr(cantFail(ExprSymbol.getAddress())));
// Delete the anonymous expression module from the JIT.
TheJIT->removeModule(H);
diff --git a/examples/Kaleidoscope/Chapter4/toy.cpp b/examples/Kaleidoscope/Chapter4/toy.cpp
index cf7d6c2bee04..921fa8908040 100644
--- a/examples/Kaleidoscope/Chapter4/toy.cpp
+++ b/examples/Kaleidoscope/Chapter4/toy.cpp
@@ -611,7 +611,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- double (*FP)() = (double (*)())(intptr_t)ExprSymbol.getAddress();
+ double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
fprintf(stderr, "Evaluated to %f\n", FP());
// Delete the anonymous expression module from the JIT.
diff --git a/examples/Kaleidoscope/Chapter5/toy.cpp b/examples/Kaleidoscope/Chapter5/toy.cpp
index 6852973bae40..2d23bdb26c21 100644
--- a/examples/Kaleidoscope/Chapter5/toy.cpp
+++ b/examples/Kaleidoscope/Chapter5/toy.cpp
@@ -885,7 +885,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- double (*FP)() = (double (*)())(intptr_t)ExprSymbol.getAddress();
+ double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
fprintf(stderr, "Evaluated to %f\n", FP());
// Delete the anonymous expression module from the JIT.
diff --git a/examples/Kaleidoscope/Chapter6/toy.cpp b/examples/Kaleidoscope/Chapter6/toy.cpp
index 0c2221735589..b5e4495539fc 100644
--- a/examples/Kaleidoscope/Chapter6/toy.cpp
+++ b/examples/Kaleidoscope/Chapter6/toy.cpp
@@ -1004,7 +1004,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- double (*FP)() = (double (*)())(intptr_t)ExprSymbol.getAddress();
+ double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
fprintf(stderr, "Evaluated to %f\n", FP());
// Delete the anonymous expression module from the JIT.
diff --git a/examples/Kaleidoscope/Chapter7/toy.cpp b/examples/Kaleidoscope/Chapter7/toy.cpp
index 79ac7b33d7a1..32f4a658c5d2 100644
--- a/examples/Kaleidoscope/Chapter7/toy.cpp
+++ b/examples/Kaleidoscope/Chapter7/toy.cpp
@@ -1173,7 +1173,7 @@ static void HandleTopLevelExpression() {
// Get the symbol's address and cast it to the right type (takes no
// arguments, returns a double) so we can call it as a native function.
- double (*FP)() = (double (*)())(intptr_t)ExprSymbol.getAddress();
+ double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
fprintf(stderr, "Evaluated to %f\n", FP());
// Delete the anonymous expression module from the JIT.
diff --git a/examples/Kaleidoscope/include/KaleidoscopeJIT.h b/examples/Kaleidoscope/include/KaleidoscopeJIT.h
index fe73d717976d..215ce03af99b 100644
--- a/examples/Kaleidoscope/include/KaleidoscopeJIT.h
+++ b/examples/Kaleidoscope/include/KaleidoscopeJIT.h
@@ -45,6 +45,7 @@ public:
KaleidoscopeJIT()
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
+ ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)) {
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
}
@@ -62,9 +63,8 @@ public:
return JITSymbol(nullptr);
},
[](const std::string &S) { return nullptr; });
- auto H = CompileLayer.addModule(std::move(M),
- make_unique<SectionMemoryManager>(),
- std::move(Resolver));
+ auto H = cantFail(CompileLayer.addModule(std::move(M),
+ std::move(Resolver)));
ModuleHandles.push_back(H);
return H;
@@ -72,7 +72,7 @@ public:
void removeModule(ModuleHandleT H) {
ModuleHandles.erase(find(ModuleHandles, H));
- CompileLayer.removeModule(H);
+ cantFail(CompileLayer.removeModule(H));
}
JITSymbol findSymbol(const std::string Name) {
@@ -115,7 +115,7 @@ private:
return JITSymbol(SymAddr, JITSymbolFlags::Exported);
#ifdef LLVM_ON_WIN32
- // For Windows retry without "_" at begining, as RTDyldMemoryManager uses
+ // For Windows retry without "_" at beginning, as RTDyldMemoryManager uses
// GetProcAddress and standard libraries like msvcrt.dll use names
// with and without "_" (for example "_itoa" but "sin").
if (Name.length() > 2 && Name[0] == '_')
diff --git a/include/llvm-c/OrcBindings.h b/include/llvm-c/OrcBindings.h
index d86ea8808889..7ee395431358 100644
--- a/include/llvm-c/OrcBindings.h
+++ b/include/llvm-c/OrcBindings.h
@@ -113,8 +113,9 @@ void LLVMOrcDisposeMangledSymbol(char *MangledSymbol);
/**
* Create a lazy compile callback.
*/
-LLVMOrcTargetAddress
+LLVMOrcErrorCode
LLVMOrcCreateLazyCompileCallback(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
LLVMOrcLazyCompileCallbackFn Callback,
void *CallbackCtx);
@@ -135,8 +136,9 @@ LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
/**
* Add module to be eagerly compiled.
*/
-LLVMOrcModuleHandle
+LLVMOrcErrorCode
LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
LLVMSharedModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
@@ -144,8 +146,9 @@ LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
/**
* Add module to be lazily compiled one function at a time.
*/
-LLVMOrcModuleHandle
+LLVMOrcErrorCode
LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
LLVMSharedModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
@@ -153,10 +156,11 @@ LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
/**
* Add an object file.
*/
-LLVMOrcModuleHandle LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
- LLVMSharedObjectBufferRef Obj,
- LLVMOrcSymbolResolverFn SymbolResolver,
- void *SymbolResolverCtx);
+LLVMOrcErrorCode LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
+ LLVMSharedObjectBufferRef Obj,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx);
/**
* Remove a module set from the JIT.
@@ -164,18 +168,20 @@ LLVMOrcModuleHandle LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
* This works for all modules that can be added via OrcAdd*, including object
* files.
*/
-void LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack, LLVMOrcModuleHandle H);
+LLVMOrcErrorCode LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle H);
/**
* Get symbol address from JIT instance.
*/
-LLVMOrcTargetAddress LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
- const char *SymbolName);
+LLVMOrcErrorCode LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
+ const char *SymbolName);
/**
* Dispose of an ORC JIT stack.
*/
-void LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
+LLVMOrcErrorCode LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
#ifdef __cplusplus
}
diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h
index e5f0c35534ac..a1cce6e5fe17 100644
--- a/include/llvm/ADT/APInt.h
+++ b/include/llvm/ADT/APInt.h
@@ -401,7 +401,11 @@ public:
/// \brief Determine if this is a value of 1.
///
/// This checks to see if the value of this APInt is one.
- bool isOneValue() const { return getActiveBits() == 1; }
+ bool isOneValue() const {
+ if (isSingleWord())
+ return U.VAL == 1;
+ return countLeadingZerosSlowCase() == BitWidth - 1;
+ }
/// \brief Determine if this is the largest unsigned value.
///
diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h
index 8c28412bb607..83f289c42a23 100644
--- a/include/llvm/ADT/STLExtras.h
+++ b/include/llvm/ADT/STLExtras.h
@@ -100,6 +100,8 @@ class function_ref<Ret(Params...)> {
}
public:
+ function_ref() : callback(nullptr) {}
+
template <typename Callable>
function_ref(Callable &&callable,
typename std::enable_if<
@@ -110,6 +112,8 @@ public:
Ret operator()(Params ...params) const {
return callback(callable, std::forward<Params>(params)...);
}
+
+ operator bool() const { return callback; }
};
// deleter - Very very very simple method that is used to invoke operator
diff --git a/include/llvm/ADT/SmallPtrSet.h b/include/llvm/ADT/SmallPtrSet.h
index a2ad74b1e04a..4e8a2490ee3c 100644
--- a/include/llvm/ADT/SmallPtrSet.h
+++ b/include/llvm/ADT/SmallPtrSet.h
@@ -15,9 +15,9 @@
#ifndef LLVM_ADT_SMALLPTRSET_H
#define LLVM_ADT_SMALLPTRSET_H
-#include "llvm/Config/abi-breaking.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/ReverseIteration.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <cstddef>
@@ -29,15 +29,6 @@
namespace llvm {
-#if LLVM_ENABLE_ABI_BREAKING_CHECKS
-template <class T = void> struct ReverseIterate { static bool value; };
-#if LLVM_ENABLE_REVERSE_ITERATION
-template <class T> bool ReverseIterate<T>::value = true;
-#else
-template <class T> bool ReverseIterate<T>::value = false;
-#endif
-#endif
-
/// SmallPtrSetImplBase - This is the common code shared among all the
/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
/// for small and one for large sets.
diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index 3e05e09900a5..5de3821242e0 100644
--- a/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -1353,4 +1353,4 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
#undef DEBUG_TYPE
-#endif
+#endif // LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
diff --git a/include/llvm/Analysis/CGSCCPassManager.h b/include/llvm/Analysis/CGSCCPassManager.h
index a15a9e18c815..32868cbecdcf 100644
--- a/include/llvm/Analysis/CGSCCPassManager.h
+++ b/include/llvm/Analysis/CGSCCPassManager.h
@@ -577,12 +577,17 @@ public:
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
- // Update the call graph based on this function pass. This may also
- // update the current SCC to point to a smaller, more refined SCC.
- CurrentC = &updateCGAndAnalysisManagerForFunctionPass(
- CG, *CurrentC, *N, AM, UR, DebugLogging);
- assert(CG.lookupSCC(*N) == CurrentC &&
- "Current SCC not updated to the SCC containing the current node!");
+ // If the call graph hasn't been preserved, update it based on this
+ // function pass. This may also update the current SCC to point to
+ // a smaller, more refined SCC.
+ auto PAC = PA.getChecker<LazyCallGraphAnalysis>();
+ if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Module>>()) {
+ CurrentC = &updateCGAndAnalysisManagerForFunctionPass(
+ CG, *CurrentC, *N, AM, UR, DebugLogging);
+ assert(
+ CG.lookupSCC(*N) == CurrentC &&
+ "Current SCC not updated to the SCC containing the current node!");
+ }
}
// By definition we preserve the proxy. And we preserve all analyses on
diff --git a/include/llvm/Analysis/InlineCost.h b/include/llvm/Analysis/InlineCost.h
index ce0b7895f253..f33a2de5a5f4 100644
--- a/include/llvm/Analysis/InlineCost.h
+++ b/include/llvm/Analysis/InlineCost.h
@@ -160,7 +160,7 @@ InlineParams getInlineParams(int Threshold);
/// the -Oz flag.
InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
-/// Return the cost associated with a callsite, including paramater passing
+/// Return the cost associated with a callsite, including parameter passing
/// and the call/return instruction.
int getCallsiteCost(CallSite CS, const DataLayout &DL);
diff --git a/include/llvm/Analysis/LazyCallGraph.h b/include/llvm/Analysis/LazyCallGraph.h
index ad7f5c80549f..3a052761ad7d 100644
--- a/include/llvm/Analysis/LazyCallGraph.h
+++ b/include/llvm/Analysis/LazyCallGraph.h
@@ -652,17 +652,23 @@ public:
/// Make an existing internal ref edge into a call edge.
///
/// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
- /// If that happens, the deleted SCC pointers are returned. These SCCs are
- /// not in a valid state any longer but the pointers will remain valid
- /// until destruction of the parent graph instance for the purpose of
- /// clearing cached information.
+ /// If that happens, the optional callback \p MergedCB will be invoked (if
+ /// provided) on the SCCs being merged away prior to actually performing
+ /// the merge. Note that this will never include the target SCC as that
+ /// will be the SCC functions are merged into to resolve the cycle. Once
+ /// this function returns, these merged SCCs are not in a valid state but
+ /// the pointers will remain valid until destruction of the parent graph
+ /// instance for the purpose of clearing cached information. This function
+ /// also returns 'true' if a cycle was formed and some SCCs merged away as
+ /// a convenience.
///
/// After this operation, both SourceN's SCC and TargetN's SCC may move
/// position within this RefSCC's postorder list. Any SCCs merged are
/// merged into the TargetN's SCC in order to preserve reachability analyses
/// which took place on that SCC.
- SmallVector<SCC *, 1> switchInternalEdgeToCall(Node &SourceN,
- Node &TargetN);
+ bool switchInternalEdgeToCall(
+ Node &SourceN, Node &TargetN,
+ function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
/// Make an existing internal call edge between separate SCCs into a ref
/// edge.
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index 60dafccd84bd..23ab372703ee 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -224,6 +224,9 @@ public:
SizeOffsetType visitSelectInst(SelectInst &I);
SizeOffsetType visitUndefValue(UndefValue&);
SizeOffsetType visitInstruction(Instruction &I);
+
+private:
+ bool CheckedZextOrTrunc(APInt &I);
};
typedef std::pair<Value*, Value*> SizeOffsetEvalType;
diff --git a/include/llvm/Analysis/RegionInfoImpl.h b/include/llvm/Analysis/RegionInfoImpl.h
index c0337b6daf37..cd4ec0a03a9e 100644
--- a/include/llvm/Analysis/RegionInfoImpl.h
+++ b/include/llvm/Analysis/RegionInfoImpl.h
@@ -34,10 +34,10 @@
#include <type_traits>
#include <vector>
-namespace llvm {
-
#define DEBUG_TYPE "region"
+namespace llvm {
+
//===----------------------------------------------------------------------===//
/// RegionBase Implementation
template <class Tr>
@@ -901,8 +901,8 @@ void RegionInfoBase<Tr>::calculate(FuncT &F) {
buildRegionsTree(DT->getNode(BB), TopLevelRegion);
}
-#undef DEBUG_TYPE
-
} // end namespace llvm
+#undef DEBUG_TYPE
+
#endif // LLVM_ANALYSIS_REGIONINFOIMPL_H
diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h
index 68fbf640994c..dfb525e3de7a 100644
--- a/include/llvm/Analysis/TargetTransformInfo.h
+++ b/include/llvm/Analysis/TargetTransformInfo.h
@@ -753,6 +753,28 @@ public:
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) const;
+ /// \returns The type to use in a loop expansion of a memcpy call.
+ Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+ unsigned SrcAlign, unsigned DestAlign) const;
+
+ /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
+ /// \param RemainingBytes The number of bytes to copy.
+ ///
+ /// Calculates the operand types to use when copying \p RemainingBytes of
+ /// memory, where source and destination alignments are \p SrcAlign and
+ /// \p DestAlign respectively.
+ void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+ LLVMContext &Context,
+ unsigned RemainingBytes,
+ unsigned SrcAlign,
+ unsigned DestAlign) const;
+
+ /// \returns True if we want to test the new memcpy lowering functionality in
+ /// Transform/Utils.
+ /// Temporary. Will be removed once we move to the new functionality and
+ /// remove the old.
+ bool useWideIRMemcpyLoopLowering() const;
+
/// \returns True if the two functions have compatible attributes for inlining
/// purposes.
bool areInlineCompatible(const Function *Caller,
@@ -953,6 +975,12 @@ public:
virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) = 0;
+ virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+ unsigned SrcAlign,
+ unsigned DestAlign) const = 0;
+ virtual void getMemcpyLoopResidualLoweringType(
+ SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
+ unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const = 0;
virtual bool areInlineCompatible(const Function *Caller,
const Function *Callee) const = 0;
virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
@@ -1266,6 +1294,19 @@ public:
Type *ExpectedType) override {
return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
}
+ Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+ unsigned SrcAlign,
+ unsigned DestAlign) const override {
+ return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAlign, DestAlign);
+ }
+ void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+ LLVMContext &Context,
+ unsigned RemainingBytes,
+ unsigned SrcAlign,
+ unsigned DestAlign) const override {
+ Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
+ SrcAlign, DestAlign);
+ }
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const override {
return Impl.areInlineCompatible(Caller, Callee);
diff --git a/include/llvm/Analysis/TargetTransformInfoImpl.h b/include/llvm/Analysis/TargetTransformInfoImpl.h
index 0246fc1c02cc..8740ee92eed5 100644
--- a/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -444,6 +444,20 @@ public:
return nullptr;
}
+ Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+ unsigned SrcAlign, unsigned DestAlign) const {
+ return Type::getInt8Ty(Context);
+ }
+
+ void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+ LLVMContext &Context,
+ unsigned RemainingBytes,
+ unsigned SrcAlign,
+ unsigned DestAlign) const {
+ for (unsigned i = 0; i != RemainingBytes; ++i)
+ OpsOut.push_back(Type::getInt8Ty(Context));
+ }
+
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const {
return (Caller->getFnAttribute("target-cpu") ==
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index e953ec8ab6ab..f4c57d4289fc 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -523,8 +523,7 @@ template <typename T> class ArrayRef;
/// (A)
Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
const DataLayout &DL,
- bool InvertAPred = false,
- unsigned Depth = 0,
+ bool LHSIsFalse = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
diff --git a/include/llvm/BinaryFormat/Wasm.h b/include/llvm/BinaryFormat/Wasm.h
index eef473b20dde..23e30b7a868d 100644
--- a/include/llvm/BinaryFormat/Wasm.h
+++ b/include/llvm/BinaryFormat/Wasm.h
@@ -94,7 +94,7 @@ struct WasmFunction {
};
struct WasmDataSegment {
- uint32_t Index;
+ uint32_t MemoryIndex;
WasmInitExpr Offset;
ArrayRef<uint8_t> Content;
};
@@ -107,7 +107,7 @@ struct WasmElemSegment {
struct WasmRelocation {
uint32_t Type; // The type of the relocation.
- int32_t Index; // Index into function to global index space.
+ uint32_t Index; // Index into function to global index space.
uint64_t Offset; // Offset from the start of the section.
int64_t Addend; // A value to add to the symbol.
};
diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h
index 5435e48ff424..3777f956cf27 100644
--- a/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/include/llvm/Bitcode/LLVMBitCodes.h
@@ -59,6 +59,8 @@ enum BlockIDs {
FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID,
SYMTAB_BLOCK_ID,
+
+ SYNC_SCOPE_NAMES_BLOCK_ID,
};
/// Identification block contains a string that describes the producer details,
@@ -172,6 +174,10 @@ enum OperandBundleTagCode {
OPERAND_BUNDLE_TAG = 1, // TAG: [strchr x N]
};
+enum SyncScopeNameCode {
+ SYNC_SCOPE_NAME = 1,
+};
+
// Value symbol table codes.
enum ValueSymtabCodes {
VST_CODE_ENTRY = 1, // VST_ENTRY: [valueid, namechar x N]
@@ -404,12 +410,6 @@ enum AtomicOrderingCodes {
ORDERING_SEQCST = 6
};
-/// Encoded SynchronizationScope values.
-enum AtomicSynchScopeCodes {
- SYNCHSCOPE_SINGLETHREAD = 0,
- SYNCHSCOPE_CROSSTHREAD = 1
-};
-
/// Markers and flags for call instruction.
enum CallMarkersFlags {
CALL_TAIL = 0,
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index c898667f1474..60bbc9aaa5bd 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -608,8 +608,8 @@ private:
// Internal Implementation Details
//===------------------------------------------------------------------===//
- /// This emits visibility information about symbol, if this is suported by the
- /// target.
+ /// This emits visibility information about symbol, if this is supported by
+ /// the target.
void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
bool IsDefinition = true) const;
diff --git a/include/llvm/CodeGen/BasicTTIImpl.h b/include/llvm/CodeGen/BasicTTIImpl.h
index a740df96899d..b59fd60e8aed 100644
--- a/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/include/llvm/CodeGen/BasicTTIImpl.h
@@ -428,7 +428,7 @@ public:
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
- bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
+ bool IsFloat = Ty->isFPOrFPVectorTy();
// Assume that floating point arithmetic operations cost twice as much as
// integer operations.
unsigned OpCost = (IsFloat ? 2 : 1);
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index ec60123e54b1..59a4073646eb 100644
--- a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -16,14 +16,17 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+#include "llvm/ADT/SmallVector.h"
#include <bitset>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <initializer_list>
+#include <vector>
namespace llvm {
+class LLT;
class MachineInstr;
class MachineInstrBuilder;
class MachineOperand;
@@ -58,6 +61,131 @@ public:
}
};
+enum {
+ /// Record the specified instruction
+ /// - NewInsnID - Instruction ID to define
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ GIM_RecordInsn,
+
+ /// Check the feature bits
+ /// - Expected features
+ GIM_CheckFeatures,
+
+ /// Check the opcode on the specified instruction
+ /// - InsnID - Instruction ID
+ /// - Expected opcode
+ GIM_CheckOpcode,
+ /// Check the instruction has the right number of operands
+ /// - InsnID - Instruction ID
+ /// - Expected number of operands
+ GIM_CheckNumOperands,
+
+ /// Check the type for the specified operand
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected type
+ GIM_CheckType,
+ /// Check the register bank for the specified operand
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected register bank (specified as a register class)
+ GIM_CheckRegBankForClass,
+ /// Check the operand matches a complex predicate
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - RendererID - The renderer to hold the result
+ /// - Complex predicate ID
+ GIM_CheckComplexPattern,
+ /// Check the operand is a specific integer
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected integer
+ GIM_CheckConstantInt,
+ /// Check the operand is a specific literal integer (i.e. MO.isImm() or MO.isCImm() is true).
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected integer
+ GIM_CheckLiteralInt,
+ /// Check the operand is a specific intrinsic ID
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected Intrinsic ID
+ GIM_CheckIntrinsicID,
+ /// Check the specified operand is an MBB
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ GIM_CheckIsMBB,
+
+ /// Check if the specified operand is safe to fold into the current
+ /// instruction.
+ /// - InsnID - Instruction ID
+ GIM_CheckIsSafeToFold,
+
+ //=== Renderers ===
+
+ /// Mutate an instruction
+ /// - NewInsnID - Instruction ID to define
+ /// - OldInsnID - Instruction ID to mutate
+ /// - NewOpcode - The new opcode to use
+ GIR_MutateOpcode,
+ /// Build a new instruction
+ /// - InsnID - Instruction ID to define
+ /// - Opcode - The new opcode to use
+ GIR_BuildMI,
+
+ /// Copy an operand to the specified instruction
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// - OpIdx - The operand to copy
+ GIR_Copy,
+ /// Copy an operand to the specified instruction
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// - OpIdx - The operand to copy
+ /// - SubRegIdx - The subregister to copy
+ GIR_CopySubReg,
+ /// Add an implicit register def to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddImplicitDef,
+ /// Add an implicit register use to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddImplicitUse,
+ /// Add an register to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddRegister,
+ /// Add an immediate to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - Imm - The immediate to add
+ GIR_AddImm,
+ /// Render complex operands to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RendererID - The renderer to call
+ GIR_ComplexRenderer,
+
+ /// Constrain an instruction operand to a register class.
+ /// - InsnID - Instruction ID to modify
+ /// - OpIdx - Operand index
+ /// - RCEnum - Register class enumeration value
+ GIR_ConstrainOperandRC,
+ /// Constrain an instructions operands according to the instruction
+ /// description.
+ /// - InsnID - Instruction ID to modify
+ GIR_ConstrainSelectedInstOperands,
+ /// Merge all memory operands into instruction.
+ /// - InsnID - Instruction ID to modify
+ GIR_MergeMemOperands,
+ /// Erase from parent.
+ /// - InsnID - Instruction ID to erase
+ GIR_EraseFromParent,
+
+ /// A successful emission
+ GIR_Done,
+};
+
/// Provides the logic to select generic machine instructions.
class InstructionSelector {
public:
@@ -78,9 +206,39 @@ public:
protected:
using ComplexRendererFn = std::function<void(MachineInstrBuilder &)>;
+ using RecordedMIVector = SmallVector<MachineInstr *, 4>;
+ using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
+
+ struct MatcherState {
+ std::vector<ComplexRendererFn> Renderers;
+ RecordedMIVector MIs;
+
+ MatcherState(unsigned MaxRenderers);
+ };
+public:
+ template <class PredicateBitset, class ComplexMatcherMemFn>
+ struct MatcherInfoTy {
+ const LLT *TypeObjects;
+ const PredicateBitset *FeatureBitsets;
+ const std::vector<ComplexMatcherMemFn> ComplexPredicates;
+ };
+
+protected:
InstructionSelector();
+ /// Execute a given matcher table and return true if the match was successful
+ /// and false otherwise.
+ template <class TgtInstructionSelector, class PredicateBitset,
+ class ComplexMatcherMemFn>
+ bool executeMatchTable(
+ TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+ const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> &MatcherInfo,
+ const int64_t *MatchTable, const TargetInstrInfo &TII,
+ MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI,
+ const PredicateBitset &AvailableFeatures) const;
+
/// Constrain a register operand of an instruction \p I to a specified
/// register class. This could involve inserting COPYs before (for uses) or
/// after (for defs) and may replace the operand of \p I.
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
new file mode 100644
index 000000000000..98b6b859b9e2
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -0,0 +1,337 @@
+//==-- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h ---------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+
+namespace llvm {
+template <class TgtInstructionSelector, class PredicateBitset,
+ class ComplexMatcherMemFn>
+bool InstructionSelector::executeMatchTable(
+ TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+ const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> &MatcherInfo,
+ const int64_t *MatchTable, const TargetInstrInfo &TII,
+ MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI,
+ const PredicateBitset &AvailableFeatures) const {
+ const int64_t *Command = MatchTable;
+ while (true) {
+ switch (*Command++) {
+ case GIM_RecordInsn: {
+ int64_t NewInsnID = *Command++;
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+
+ // As an optimisation we require that MIs[0] is always the root. Refuse
+ // any attempt to modify it.
+ assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
+ (void)NewInsnID;
+
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg()) {
+ DEBUG(dbgs() << "Rejected (not a register)\n");
+ return false;
+ }
+ if (TRI.isPhysicalRegister(MO.getReg())) {
+ DEBUG(dbgs() << "Rejected (is a physical register)\n");
+ return false;
+ }
+
+ assert((size_t)NewInsnID == State.MIs.size() &&
+ "Expected to store MIs in order");
+ State.MIs.push_back(MRI.getVRegDef(MO.getReg()));
+ DEBUG(dbgs() << "MIs[" << NewInsnID << "] = GIM_RecordInsn(" << InsnID
+ << ", " << OpIdx << ")\n");
+ break;
+ }
+
+ case GIM_CheckFeatures: {
+ int64_t ExpectedBitsetID = *Command++;
+ DEBUG(dbgs() << "GIM_CheckFeatures(ExpectedBitsetID=" << ExpectedBitsetID
+ << ")\n");
+ if ((AvailableFeatures & MatcherInfo.FeatureBitsets[ExpectedBitsetID]) !=
+ MatcherInfo.FeatureBitsets[ExpectedBitsetID]) {
+ DEBUG(dbgs() << "Rejected\n");
+ return false;
+ }
+ break;
+ }
+
+ case GIM_CheckOpcode: {
+ int64_t InsnID = *Command++;
+ int64_t Expected = *Command++;
+
+ unsigned Opcode = State.MIs[InsnID]->getOpcode();
+ DEBUG(dbgs() << "GIM_CheckOpcode(MIs[" << InsnID << "], ExpectedOpcode="
+ << Expected << ") // Got=" << Opcode << "\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (Opcode != Expected)
+ return false;
+ break;
+ }
+ case GIM_CheckNumOperands: {
+ int64_t InsnID = *Command++;
+ int64_t Expected = *Command++;
+ DEBUG(dbgs() << "GIM_CheckNumOperands(MIs[" << InsnID
+ << "], Expected=" << Expected << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (State.MIs[InsnID]->getNumOperands() != Expected)
+ return false;
+ break;
+ }
+
+ case GIM_CheckType: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t TypeID = *Command++;
+ DEBUG(dbgs() << "GIM_CheckType(MIs[" << InsnID << "]->getOperand("
+ << OpIdx << "), TypeID=" << TypeID << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg()) !=
+ MatcherInfo.TypeObjects[TypeID])
+ return false;
+ break;
+ }
+ case GIM_CheckRegBankForClass: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t RCEnum = *Command++;
+ DEBUG(dbgs() << "GIM_CheckRegBankForClass(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "), RCEnum=" << RCEnum
+ << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (&RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum)) !=
+ RBI.getRegBank(State.MIs[InsnID]->getOperand(OpIdx).getReg(), MRI, TRI))
+ return false;
+ break;
+ }
+ case GIM_CheckComplexPattern: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t RendererID = *Command++;
+ int64_t ComplexPredicateID = *Command++;
+ DEBUG(dbgs() << "State.Renderers[" << RendererID
+ << "] = GIM_CheckComplexPattern(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), ComplexPredicateID=" << ComplexPredicateID << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ // FIXME: Use std::invoke() when it's available.
+ if (!(State.Renderers[RendererID] =
+ (ISel.*MatcherInfo.ComplexPredicates[ComplexPredicateID])(
+ State.MIs[InsnID]->getOperand(OpIdx))))
+ return false;
+ break;
+ }
+ case GIM_CheckConstantInt: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t Value = *Command++;
+ DEBUG(dbgs() << "GIM_CheckConstantInt(MIs[" << InsnID << "]->getOperand("
+ << OpIdx << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!isOperandImmEqual(State.MIs[InsnID]->getOperand(OpIdx), Value, MRI))
+ return false;
+ break;
+ }
+ case GIM_CheckLiteralInt: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t Value = *Command++;
+ DEBUG(dbgs() << "GIM_CheckLiteralInt(MIs[" << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!OM.isCImm() || !OM.getCImm()->equalsInt(Value))
+ return false;
+ break;
+ }
+ case GIM_CheckIntrinsicID: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t Value = *Command++;
+ DEBUG(dbgs() << "GIM_CheckIntrinsicID(MIs[" << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!OM.isIntrinsicID() || OM.getIntrinsicID() != Value)
+ return false;
+ break;
+ }
+ case GIM_CheckIsMBB: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ DEBUG(dbgs() << "GIM_CheckIsMBB(MIs[" << InsnID << "]->getOperand("
+ << OpIdx << "))\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB())
+ return false;
+ break;
+ }
+
+ case GIM_CheckIsSafeToFold: {
+ int64_t InsnID = *Command++;
+ DEBUG(dbgs() << "GIM_CheckIsSafeToFold(MIs[" << InsnID << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!isObviouslySafeToFold(*State.MIs[InsnID]))
+ return false;
+ break;
+ }
+
+ case GIR_MutateOpcode: {
+ int64_t OldInsnID = *Command++;
+ int64_t NewInsnID = *Command++;
+ int64_t NewOpcode = *Command++;
+ assert((size_t)NewInsnID == OutMIs.size() &&
+ "Expected to store MIs in order");
+ OutMIs.push_back(
+ MachineInstrBuilder(*State.MIs[OldInsnID]->getParent()->getParent(),
+ State.MIs[OldInsnID]));
+ OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
+ DEBUG(dbgs() << "GIR_MutateOpcode(OutMIs[" << NewInsnID << "], MIs["
+ << OldInsnID << "], " << NewOpcode << ")\n");
+ break;
+ }
+ case GIR_BuildMI: {
+ int64_t InsnID = *Command++;
+ int64_t Opcode = *Command++;
+ assert((size_t)InsnID == OutMIs.size() &&
+ "Expected to store MIs in order");
+ (void)InsnID;
+ OutMIs.push_back(BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
+ State.MIs[0]->getDebugLoc(), TII.get(Opcode)));
+ DEBUG(dbgs() << "GIR_BuildMI(OutMIs[" << InsnID << "], " << Opcode
+ << ")\n");
+ break;
+ }
+
+ case GIR_Copy: {
+ int64_t NewInsnID = *Command++;
+ int64_t OldInsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
+ DEBUG(dbgs() << "GIR_Copy(OutMIs[" << NewInsnID << "], MIs[" << OldInsnID
+ << "], " << OpIdx << ")\n");
+ break;
+ }
+ case GIR_CopySubReg: {
+ int64_t NewInsnID = *Command++;
+ int64_t OldInsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t SubRegIdx = *Command++;
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
+ 0, SubRegIdx);
+ DEBUG(dbgs() << "GIR_CopySubReg(OutMIs[" << NewInsnID << "], MIs["
+ << OldInsnID << "], " << OpIdx << ", " << SubRegIdx
+ << ")\n");
+ break;
+ }
+ case GIR_AddImplicitDef: {
+ int64_t InsnID = *Command++;
+ int64_t RegNum = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
+ DEBUG(dbgs() << "GIR_AddImplicitDef(OutMIs[" << InsnID << "], " << RegNum
+ << ")\n");
+ break;
+ }
+ case GIR_AddImplicitUse: {
+ int64_t InsnID = *Command++;
+ int64_t RegNum = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
+ DEBUG(dbgs() << "GIR_AddImplicitUse(OutMIs[" << InsnID << "], " << RegNum
+ << ")\n");
+ break;
+ }
+ case GIR_AddRegister: {
+ int64_t InsnID = *Command++;
+ int64_t RegNum = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addReg(RegNum);
+ DEBUG(dbgs() << "GIR_AddRegister(OutMIs[" << InsnID << "], " << RegNum
+ << ")\n");
+ break;
+ }
+ case GIR_AddImm: {
+ int64_t InsnID = *Command++;
+ int64_t Imm = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addImm(Imm);
+ DEBUG(dbgs() << "GIR_AddImm(OutMIs[" << InsnID << "], " << Imm << ")\n");
+ break;
+ }
+ case GIR_ComplexRenderer: {
+ int64_t InsnID = *Command++;
+ int64_t RendererID = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ State.Renderers[RendererID](OutMIs[InsnID]);
+ DEBUG(dbgs() << "GIR_ComplexRenderer(OutMIs[" << InsnID << "], "
+ << RendererID << ")\n");
+ break;
+ }
+
+ case GIR_ConstrainOperandRC: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t RCEnum = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ constrainOperandRegToRegClass(*OutMIs[InsnID].getInstr(), OpIdx,
+ *TRI.getRegClass(RCEnum), TII, TRI, RBI);
+ DEBUG(dbgs() << "GIR_ConstrainOperandRC(OutMIs[" << InsnID << "], "
+ << OpIdx << ", " << RCEnum << ")\n");
+ break;
+ }
+ case GIR_ConstrainSelectedInstOperands: {
+ int64_t InsnID = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
+ RBI);
+ DEBUG(dbgs() << "GIR_ConstrainSelectedInstOperands(OutMIs[" << InsnID
+ << "])\n");
+ break;
+ }
+ case GIR_MergeMemOperands: {
+ int64_t InsnID = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ for (const auto *FromMI : State.MIs)
+ for (const auto &MMO : FromMI->memoperands())
+ OutMIs[InsnID].addMemOperand(MMO);
+ DEBUG(dbgs() << "GIR_MergeMemOperands(OutMIs[" << InsnID << "])\n");
+ break;
+ }
+ case GIR_EraseFromParent: {
+ int64_t InsnID = *Command++;
+ assert(State.MIs[InsnID] &&
+ "Attempted to erase an undefined instruction");
+ State.MIs[InsnID]->eraseFromParent();
+ DEBUG(dbgs() << "GIR_EraseFromParent(MIs[" << InsnID << "])\n");
+ break;
+ }
+
+ case GIR_Done:
+ DEBUG(dbgs() << "GIR_Done");
+ return true;
+
+ default:
+ llvm_unreachable("Unexpected command");
+ }
+ }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 5197ba869c0a..1fd45b52e3ac 100644
--- a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -101,11 +101,11 @@ private:
const LegalizerInfo &LI;
};
-/// Helper function that replaces \p MI with a libcall.
+/// Helper function that creates the given libcall.
LegalizerHelper::LegalizeResult
-replaceWithLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
- RTLIB::Libcall Libcall, const CallLowering::ArgInfo &Result,
- ArrayRef<CallLowering::ArgInfo> Args);
+createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+ const CallLowering::ArgInfo &Result,
+ ArrayRef<CallLowering::ArgInfo> Args);
} // End namespace llvm.
diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index c9327d50432e..85e6fef1f3c2 100644
--- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
@@ -59,6 +60,21 @@ class MachineIRBuilder {
}
void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
+ MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1);
+
+ unsigned getDestFromArg(unsigned Reg) { return Reg; }
+ unsigned getDestFromArg(LLT Ty) {
+ return getMF().getRegInfo().createGenericVirtualRegister(Ty);
+ }
+ unsigned getDestFromArg(const TargetRegisterClass *RC) {
+ return getMF().getRegInfo().createVirtualRegister(RC);
+ }
+
+ unsigned getRegFromArg(unsigned Reg) { return Reg; }
+
+ unsigned getRegFromArg(const MachineInstrBuilder &MIB) {
+ return MIB->getOperand(0).getReg();
+ }
public:
/// Getter for the function we currently build.
@@ -120,6 +136,22 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildInstr(unsigned Opcode);
+ /// DAG like Generic method for building arbitrary instructions as above.
+ /// \Opc opcode for the instruction.
+ /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
+ /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
+ /// Uses of type MachineInstrBuilder will perform
+ /// getOperand(0).getReg() to convert to register.
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+ UseArgsTy &&... Args) {
+ auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+ unsigned It[] = {(getRegFromArg(Args))...};
+ for (const auto &i : It)
+ MIB.addUse(i);
+ return MIB;
+ }
+
/// Build but don't insert <empty> = \p Opcode <empty>.
///
/// \pre setMF, setBasicBlock or setMI must have been called.
@@ -188,6 +220,11 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
unsigned Op1);
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+ unsigned Res = getDestFromArg(Ty);
+ return buildAdd(Res, (getRegFromArg(UseArgs))...);
+ }
/// Build and insert \p Res<def> = G_SUB \p Op0, \p Op1
///
@@ -295,6 +332,18 @@ public:
MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
unsigned Op1);
+ /// Build and insert \p Res<def> = G_OR \p Op0, \p Op1
+ ///
+ /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
+ /// Op1.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
+
/// Build and insert \p Res<def> = G_ANYEXT \p Op0
///
/// G_ANYEXT produces a register of the specified width, with bits 0 to
@@ -416,6 +465,10 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
+ template <typename DstType>
+ MachineInstrBuilder buildConstant(DstType &&Res, int64_t Val) {
+ return buildConstant(getDestFromArg(Res), Val);
+ }
/// Build and insert \p Res = G_FCONSTANT \p Val
///
/// G_FCONSTANT is a floating-point constant with the specified size and
diff --git a/include/llvm/CodeGen/LiveRegUnits.h b/include/llvm/CodeGen/LiveRegUnits.h
index fa1ec867ea3d..c28b1a06854f 100644
--- a/include/llvm/CodeGen/LiveRegUnits.h
+++ b/include/llvm/CodeGen/LiveRegUnits.h
@@ -93,12 +93,14 @@ public:
}
/// Updates liveness when stepping backwards over the instruction \p MI.
+ /// This removes all register units defined or clobbered in \p MI and then
+ /// adds the units used (as in use operands) in \p MI.
void stepBackward(const MachineInstr &MI);
- /// Mark all register units live during instruction \p MI.
- /// This can be used to accumulate live/unoccupied registers over a range of
- /// instructions.
- void accumulateBackward(const MachineInstr &MI);
+ /// Adds all register units used, defined or clobbered in \p MI.
+ /// This is useful when walking over a range of instruction to find registers
+ /// unused over the whole range.
+ void accumulate(const MachineInstr &MI);
/// Adds registers living out of block \p MBB.
/// Live out registers are the union of the live-in registers of the successor
diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h
index f67da7b01c54..19173fa39bdc 100644
--- a/include/llvm/CodeGen/MachineFunction.h
+++ b/include/llvm/CodeGen/MachineFunction.h
@@ -650,7 +650,7 @@ public:
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h
index 78adce507b8c..a9de0db05d72 100644
--- a/include/llvm/CodeGen/MachineMemOperand.h
+++ b/include/llvm/CodeGen/MachineMemOperand.h
@@ -114,6 +114,9 @@ public:
MOInvariant = 1u << 5,
// Reserved for use by target-specific passes.
+ // Targets may override getSerializableMachineMemOperandTargetFlags() to
+ // enable MIR serialization/parsing of these flags. If more of these flags
+ // are added, the MIR printing/parsing code will need to be updated as well.
MOTargetFlag1 = 1u << 6,
MOTargetFlag2 = 1u << 7,
MOTargetFlag3 = 1u << 8,
@@ -124,8 +127,8 @@ public:
private:
/// Atomic information for this memory operation.
struct MachineAtomicInfo {
- /// Synchronization scope for this memory operation.
- unsigned SynchScope : 1; // enum SynchronizationScope
+ /// Synchronization scope ID for this memory operation.
+ unsigned SSID : 8; // SyncScope::ID
/// Atomic ordering requirements for this memory operation. For cmpxchg
/// atomic operations, atomic ordering requirements when store occurs.
unsigned Ordering : 4; // enum AtomicOrdering
@@ -152,7 +155,7 @@ public:
unsigned base_alignment,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
@@ -202,9 +205,9 @@ public:
/// Return the range tag for the memory reference.
const MDNode *getRanges() const { return Ranges; }
- /// Return the synchronization scope for this memory operation.
- SynchronizationScope getSynchScope() const {
- return static_cast<SynchronizationScope>(AtomicInfo.SynchScope);
+ /// Returns the synchronization scope ID for this memory operation.
+ SyncScope::ID getSyncScopeID() const {
+ return static_cast<SyncScope::ID>(AtomicInfo.SSID);
}
/// Return the atomic ordering requirements for this memory operation. For
diff --git a/include/llvm/CodeGen/RuntimeLibcalls.h b/include/llvm/CodeGen/RuntimeLibcalls.h
index 8c3aacaa8efc..08151be11083 100644
--- a/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -340,6 +340,18 @@ namespace RTLIB {
MEMCPY_ELEMENT_UNORDERED_ATOMIC_8,
MEMCPY_ELEMENT_UNORDERED_ATOMIC_16,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16,
+
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_1,
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_2,
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_4,
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_8,
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_16,
+
// EXCEPTION HANDLING
UNWIND_RESUME,
@@ -515,6 +527,17 @@ namespace RTLIB {
/// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
/// UNKNOW_LIBCALL if there is none.
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+ /// getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return
+ /// MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+ /// UNKNOW_LIBCALL if there is none.
+ Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+ /// getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return
+ /// MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+ /// UNKNOW_LIBCALL if there is none.
+ Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
}
}
diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h
index 4d72eda5c71a..25afc5b506df 100644
--- a/include/llvm/CodeGen/ScheduleDAG.h
+++ b/include/llvm/CodeGen/ScheduleDAG.h
@@ -235,6 +235,9 @@ class TargetRegisterInfo;
"SDep::Output edge cannot use the zero register!");
Contents.Reg = Reg;
}
+
+ raw_ostream &print(raw_ostream &O,
+ const TargetRegisterInfo *TRI = nullptr) const;
};
template <>
@@ -458,7 +461,10 @@ class TargetRegisterInfo;
void dump(const ScheduleDAG *G) const;
void dumpAll(const ScheduleDAG *G) const;
- void print(raw_ostream &O, const ScheduleDAG *G) const;
+ raw_ostream &print(raw_ostream &O,
+ const SUnit *N = nullptr,
+ const SUnit *X = nullptr) const;
+ raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
private:
void ComputeDepth();
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index f3f3003b7e20..55a23c3cca9b 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -927,7 +927,7 @@ public:
SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment, AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTs, SDValue Chain, SDValue Ptr,
SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
@@ -937,7 +937,7 @@ public:
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, const Value *PtrVal,
unsigned Alignment, AtomicOrdering Ordering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index d9f8af0e21d1..db42fb6c170c 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1213,8 +1213,8 @@ public:
/// Returns the Ranges that describes the dereference.
const MDNode *getRanges() const { return MMO->getRanges(); }
- /// Return the synchronization scope for this memory operation.
- SynchronizationScope getSynchScope() const { return MMO->getSynchScope(); }
+ /// Returns the synchronization scope ID for this memory operation.
+ SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
/// Return the atomic ordering requirements for this memory operation. For
/// cmpxchg atomic operations, return the atomic ordering requirements when
@@ -1432,8 +1432,8 @@ public:
int64_t getSExtValue() const { return Value->getSExtValue(); }
bool isOne() const { return Value->isOne(); }
- bool isNullValue() const { return Value->isNullValue(); }
- bool isAllOnesValue() const { return Value->isAllOnesValue(); }
+ bool isNullValue() const { return Value->isZero(); }
+ bool isAllOnesValue() const { return Value->isMinusOne(); }
bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
diff --git a/include/llvm/DebugInfo/CodeView/SymbolRecord.h b/include/llvm/DebugInfo/CodeView/SymbolRecord.h
index 7941af8be8af..cdfc1745cea5 100644
--- a/include/llvm/DebugInfo/CodeView/SymbolRecord.h
+++ b/include/llvm/DebugInfo/CodeView/SymbolRecord.h
@@ -735,6 +735,10 @@ public:
uint16_t VersionBackendQFE;
StringRef Version;
+ void setLanguage(SourceLanguage Lang) {
+ Flags = CompileSym3Flags((uint32_t(Flags) & 0xFFFFFF00) | uint32_t(Lang));
+ }
+
uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }
diff --git a/include/llvm/DebugInfo/CodeView/TypeIndex.h b/include/llvm/DebugInfo/CodeView/TypeIndex.h
index 10d51c2d6244..e0c2226bdbd7 100644
--- a/include/llvm/DebugInfo/CodeView/TypeIndex.h
+++ b/include/llvm/DebugInfo/CodeView/TypeIndex.h
@@ -10,9 +10,11 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Endian.h"
#include <cassert>
#include <cinttypes>
+#include <functional>
namespace llvm {
@@ -265,6 +267,23 @@ struct TypeIndexOffset {
void printTypeIndex(ScopedPrinter &Printer, StringRef FieldName, TypeIndex TI,
TypeCollection &Types);
}
-}
+
+template <> struct DenseMapInfo<codeview::TypeIndex> {
+ static inline codeview::TypeIndex getEmptyKey() {
+ return codeview::TypeIndex{DenseMapInfo<uint32_t>::getEmptyKey()};
+ }
+ static inline codeview::TypeIndex getTombstoneKey() {
+ return codeview::TypeIndex{DenseMapInfo<uint32_t>::getTombstoneKey()};
+ }
+ static unsigned getHashValue(const codeview::TypeIndex &TI) {
+ return DenseMapInfo<uint32_t>::getHashValue(TI.getIndex());
+ }
+ static bool isEqual(const codeview::TypeIndex &LHS,
+ const codeview::TypeIndex &RHS) {
+ return LHS == RHS;
+ }
+};
+
+} // namespace llvm
#endif
diff --git a/include/llvm/DebugInfo/DIContext.h b/include/llvm/DebugInfo/DIContext.h
index 4126e245ff13..936813dc6abc 100644
--- a/include/llvm/DebugInfo/DIContext.h
+++ b/include/llvm/DebugInfo/DIContext.h
@@ -204,7 +204,9 @@ public:
/// need to be consistent with the addresses used to query the DIContext and
/// the output of this function should be deterministic, i.e. repeated calls with
/// the same Sec should give the same address.
- virtual uint64_t getSectionLoadAddress(const object::SectionRef &Sec) const = 0;
+ virtual uint64_t getSectionLoadAddress(const object::SectionRef &Sec) const {
+ return 0;
+ }
/// If conveniently available, return the content of the given Section.
///
@@ -221,12 +223,28 @@ public:
return false;
}
+ // FIXME: This is untested and unused anywhere in the LLVM project, it's
+ // used/needed by Julia (an external project). It should have some coverage
+ // (at least tests, but ideally example functionality).
/// Obtain a copy of this LoadedObjectInfo.
- ///
- /// The caller is responsible for deallocation once the copy is no longer required.
virtual std::unique_ptr<LoadedObjectInfo> clone() const = 0;
};
+template <typename Derived, typename Base = LoadedObjectInfo>
+struct LoadedObjectInfoHelper : Base {
+protected:
+ LoadedObjectInfoHelper(const LoadedObjectInfoHelper &) = default;
+ LoadedObjectInfoHelper() = default;
+
+public:
+ template <typename... Ts>
+ LoadedObjectInfoHelper(Ts &&... Args) : Base(std::forward<Ts>(Args)...) {}
+
+ std::unique_ptr<llvm::LoadedObjectInfo> clone() const override {
+ return llvm::make_unique<Derived>(static_cast<const Derived &>(*this));
+ }
+};
+
} // end namespace llvm
#endif // LLVM_DEBUGINFO_DICONTEXT_H
diff --git a/include/llvm/DebugInfo/DWARF/DWARFContext.h b/include/llvm/DebugInfo/DWARF/DWARFContext.h
index 739aa1f9ee74..ee2e805050c0 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -226,11 +226,7 @@ public:
virtual bool isLittleEndian() const = 0;
virtual uint8_t getAddressSize() const = 0;
virtual const DWARFSection &getInfoSection() = 0;
-
- using TypeSectionMap = MapVector<object::SectionRef, DWARFSection,
- std::map<object::SectionRef, unsigned>>;
-
- virtual const TypeSectionMap &getTypesSections() = 0;
+ virtual void forEachTypesSections(function_ref<void(DWARFSection &)> F) = 0;
virtual StringRef getAbbrevSection() = 0;
virtual const DWARFSection &getLocSection() = 0;
virtual StringRef getARangeSection() = 0;
@@ -252,7 +248,8 @@ public:
// Sections for DWARF5 split dwarf proposal.
virtual const DWARFSection &getInfoDWOSection() = 0;
- virtual const TypeSectionMap &getTypesDWOSections() = 0;
+ virtual void
+ forEachTypesDWOSections(function_ref<void(DWARFSection &)> F) = 0;
virtual StringRef getAbbrevDWOSection() = 0;
virtual const DWARFSection &getLineDWOSection() = 0;
virtual const DWARFSection &getLocDWOSection() = 0;
@@ -294,6 +291,9 @@ enum class ErrorPolicy { Halt, Continue };
class DWARFContextInMemory : public DWARFContext {
virtual void anchor();
+ using TypeSectionMap = MapVector<object::SectionRef, DWARFSection,
+ std::map<object::SectionRef, unsigned>>;
+
StringRef FileName;
bool IsLittleEndian;
uint8_t AddressSize;
@@ -338,7 +338,8 @@ class DWARFContextInMemory : public DWARFContext {
SmallVector<SmallString<32>, 4> UncompressedSections;
- StringRef *MapSectionToMember(StringRef Name);
+ DWARFSection *mapNameToDWARFSection(StringRef Name);
+ StringRef *mapSectionToMember(StringRef Name);
/// If Sec is compressed section, decompresses and updates its contents
/// provided by Data. Otherwise leaves it unchanged.
@@ -362,7 +363,10 @@ public:
bool isLittleEndian() const override { return IsLittleEndian; }
uint8_t getAddressSize() const override { return AddressSize; }
const DWARFSection &getInfoSection() override { return InfoSection; }
- const TypeSectionMap &getTypesSections() override { return TypesSections; }
+ void forEachTypesSections(function_ref<void(DWARFSection &)> F) override {
+ for (auto &P : TypesSections)
+ F(P.second);
+ }
StringRef getAbbrevSection() override { return AbbrevSection; }
const DWARFSection &getLocSection() override { return LocSection; }
StringRef getARangeSection() override { return ARangeSection; }
@@ -389,8 +393,9 @@ public:
// Sections for DWARF5 split dwarf proposal.
const DWARFSection &getInfoDWOSection() override { return InfoDWOSection; }
- const TypeSectionMap &getTypesDWOSections() override {
- return TypesDWOSections;
+ void forEachTypesDWOSections(function_ref<void(DWARFSection &)> F) override {
+ for (auto &P : TypesDWOSections)
+ F(P.second);
}
StringRef getAbbrevDWOSection() override { return AbbrevDWOSection; }
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
index e4cb1b24e30d..c918a5d5e976 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
@@ -47,6 +47,7 @@ public:
DbiModuleDescriptorBuilder &
operator=(const DbiModuleDescriptorBuilder &) = delete;
+ void setPdbFilePathNI(uint32_t NI);
void setObjFileName(StringRef Name);
void addSymbol(codeview::CVSymbol Symbol);
@@ -68,6 +69,10 @@ public:
uint32_t calculateSerializedLength() const;
+ /// Return the offset within the module symbol stream of the next symbol
+ /// record passed to addSymbol. Add four to account for the signature.
+ uint32_t getNextSymbolOffset() const { return SymbolByteSize + 4; }
+
void finalize();
Error finalizeMsfLayout();
@@ -81,6 +86,7 @@ private:
msf::MSFBuilder &MSF;
uint32_t SymbolByteSize = 0;
+ uint32_t PdbFilePathNI = 0;
std::string ModuleName;
std::string ObjFileName;
std::vector<std::string> SourceFiles;
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStream.h b/include/llvm/DebugInfo/PDB/Native/DbiStream.h
index 3bf790726656..4be113f28d6f 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStream.h
@@ -83,6 +83,8 @@ public:
FixedStreamArray<SecMapEntry> getSectionMap() const;
void visitSectionContributions(ISectionContribVisitor &Visitor) const;
+ Expected<StringRef> getECName(uint32_t NI) const;
+
private:
Error initializeSectionContributionData();
Error initializeSectionHeadersData();
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
index 744411854181..63eb34f0326a 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
@@ -15,6 +15,7 @@
#include "llvm/Support/Error.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
+#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/BinaryByteStream.h"
@@ -54,8 +55,13 @@ public:
// Add given bytes as a new stream.
Error addDbgStream(pdb::DbgHeaderType Type, ArrayRef<uint8_t> Data);
+ uint32_t addECName(StringRef Name);
+
uint32_t calculateSerializedLength() const;
+ void setPublicsStreamIndex(uint32_t Index);
+ void setSymbolRecordStreamIndex(uint32_t Index);
+
Expected<DbiModuleDescriptorBuilder &> addModuleInfo(StringRef ModuleName);
Error addModuleSourceFile(StringRef Module, StringRef File);
Error addModuleSourceFile(DbiModuleDescriptorBuilder &Module, StringRef File);
@@ -75,7 +81,7 @@ public:
private:
struct DebugStream {
ArrayRef<uint8_t> Data;
- uint16_t StreamNumber = 0;
+ uint16_t StreamNumber = kInvalidStreamIndex;
};
Error finalize();
@@ -87,7 +93,6 @@ private:
uint32_t calculateNamesBufferSize() const;
uint32_t calculateDbgStreamsSize() const;
- Error generateModiSubstream();
Error generateFileInfoSubstream();
msf::MSFBuilder &Msf;
@@ -100,6 +105,8 @@ private:
uint16_t PdbDllRbld;
uint16_t Flags;
PDB_Machine MachineType;
+ uint32_t PublicsStreamIndex = kInvalidStreamIndex;
+ uint32_t SymRecordStreamIndex = kInvalidStreamIndex;
const DbiStreamHeader *Header;
@@ -108,6 +115,7 @@ private:
StringMap<uint32_t> SourceFileNames;
+ PDBStringTableBuilder ECNamesBuilder;
WritableBinaryStreamRef NamesBuffer;
MutableBinaryByteStream FileInfoBuffer;
std::vector<SectionContrib> SectionContribs;
diff --git a/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h b/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
index 25f66240a6a2..17a82b7ce12d 100644
--- a/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
+++ b/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
@@ -44,7 +44,7 @@ public:
bool get(StringRef Stream, uint32_t &StreamNo) const;
void set(StringRef Stream, uint32_t StreamNo);
void remove(StringRef Stream);
-
+ const StringMap<uint32_t> &getStringMap() const { return Mapping; }
iterator_range<StringMapConstIterator<uint32_t>> entries() const;
private:
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h
new file mode 100644
index 000000000000..4f532c6e3829
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h
@@ -0,0 +1,49 @@
+//===- NativeBuiltinSymbol.h -------------------------------------- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEBUILTINSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEBUILTINSYMBOL_H
+
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeSession;
+
+class NativeBuiltinSymbol : public NativeRawSymbol {
+public:
+ NativeBuiltinSymbol(NativeSession &PDBSession, SymIndexId Id,
+ PDB_BuiltinType T, uint64_t L);
+ ~NativeBuiltinSymbol() override;
+
+ virtual std::unique_ptr<NativeRawSymbol> clone() const override;
+
+ void dump(raw_ostream &OS, int Indent) const override;
+
+ PDB_SymType getSymTag() const override;
+
+ PDB_BuiltinType getBuiltinType() const override;
+ bool isConstType() const override;
+ uint64_t getLength() const override;
+ bool isUnalignedType() const override;
+ bool isVolatileType() const override;
+
+protected:
+ NativeSession &Session;
+ PDB_BuiltinType Type;
+ uint64_t Length;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
index 1687737f0e7f..bd5c09e5ff76 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
@@ -18,7 +18,7 @@ namespace pdb {
class NativeCompilandSymbol : public NativeRawSymbol {
public:
- NativeCompilandSymbol(NativeSession &Session, uint32_t SymbolId,
+ NativeCompilandSymbol(NativeSession &Session, SymIndexId SymbolId,
DbiModuleDescriptor MI);
std::unique_ptr<NativeRawSymbol> clone() const override;
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
index 15bac78df191..ddb7f811da38 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
@@ -18,7 +18,7 @@ namespace pdb {
class NativeExeSymbol : public NativeRawSymbol {
public:
- NativeExeSymbol(NativeSession &Session, uint32_t SymbolId);
+ NativeExeSymbol(NativeSession &Session, SymIndexId SymbolId);
std::unique_ptr<NativeRawSymbol> clone() const override;
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
index a24a972879d2..66a9eae28e23 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
@@ -19,9 +19,11 @@ namespace pdb {
class NativeSession;
+typedef uint32_t SymIndexId;
+
class NativeRawSymbol : public IPDBRawSymbol {
public:
- NativeRawSymbol(NativeSession &PDBSession, uint32_t SymbolId);
+ NativeRawSymbol(NativeSession &PDBSession, SymIndexId SymbolId);
virtual std::unique_ptr<NativeRawSymbol> clone() const = 0;
@@ -205,7 +207,7 @@ public:
protected:
NativeSession &Session;
- uint32_t SymbolId;
+ SymIndexId SymbolId;
};
} // end namespace pdb
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeSession.h b/include/llvm/DebugInfo/PDB/Native/NativeSession.h
index dd40874dc5f2..b16ce231c349 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeSession.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeSession.h
@@ -10,9 +10,13 @@
#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVESESSION_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVESESSION_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/IPDBSession.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
@@ -35,6 +39,8 @@ public:
std::unique_ptr<PDBSymbolCompiland>
createCompilandSymbol(DbiModuleDescriptor MI);
+ SymIndexId findSymbolByTypeIndex(codeview::TypeIndex TI);
+
uint64_t getLoadAddress() const override;
void setLoadAddress(uint64_t Address) override;
std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
@@ -77,6 +83,7 @@ private:
std::unique_ptr<PDBFile> Pdb;
std::unique_ptr<BumpPtrAllocator> Allocator;
std::vector<std::unique_ptr<NativeRawSymbol>> SymbolCache;
+ DenseMap<codeview::TypeIndex, SymIndexId> TypeIndexToSymbolId;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h b/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
index cd7d3b063793..2dc23f819d3b 100644
--- a/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
@@ -31,11 +31,13 @@ class MSFBuilder;
namespace pdb {
class DbiStreamBuilder;
class InfoStreamBuilder;
+class PublicsStreamBuilder;
class TpiStreamBuilder;
class PDBFileBuilder {
public:
explicit PDBFileBuilder(BumpPtrAllocator &Allocator);
+ ~PDBFileBuilder();
PDBFileBuilder(const PDBFileBuilder &) = delete;
PDBFileBuilder &operator=(const PDBFileBuilder &) = delete;
@@ -47,6 +49,7 @@ public:
TpiStreamBuilder &getTpiBuilder();
TpiStreamBuilder &getIpiBuilder();
PDBStringTableBuilder &getStringTableBuilder();
+ PublicsStreamBuilder &getPublicsBuilder();
Error commit(StringRef Filename);
@@ -61,6 +64,7 @@ private:
std::unique_ptr<msf::MSFBuilder> Msf;
std::unique_ptr<InfoStreamBuilder> Info;
std::unique_ptr<DbiStreamBuilder> Dbi;
+ std::unique_ptr<PublicsStreamBuilder> Publics;
std::unique_ptr<TpiStreamBuilder> Tpi;
std::unique_ptr<TpiStreamBuilder> Ipi;
diff --git a/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h b/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
index 86ef1136b41d..29167c966d42 100644
--- a/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
+++ b/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
@@ -56,7 +56,6 @@ private:
const PDBStringTableHeader *Header = nullptr;
codeview::DebugStringTableSubsectionRef Strings;
FixedStreamArray<support::ulittle32_t> IDs;
- uint32_t ByteSize = 0;
uint32_t NameCount = 0;
};
diff --git a/include/llvm/DebugInfo/PDB/Native/PublicsStream.h b/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
index 4570c80c76d7..9ace826bd8f7 100644
--- a/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
@@ -25,8 +25,6 @@ struct GSIHashHeader;
class PDBFile;
class PublicsStream {
- struct HeaderInfo;
-
public:
PublicsStream(PDBFile &File, std::unique_ptr<msf::MappedBlockStream> Stream);
~PublicsStream();
@@ -65,7 +63,7 @@ private:
FixedStreamArray<support::ulittle32_t> ThunkMap;
FixedStreamArray<SectionOffset> SectionOffsets;
- const HeaderInfo *Header;
+ const PublicsStreamHeader *Header;
const GSIHashHeader *HashHdr;
};
}
diff --git a/include/llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h
new file mode 100644
index 000000000000..5ab57ebef53d
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h
@@ -0,0 +1,54 @@
+//===- PublicsStreamBuilder.h - PDB Publics Stream Creation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBPUBLICSTREAMBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBPUBLICSTREAMBUILDER_H
+
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace msf {
+class MSFBuilder;
+}
+namespace pdb {
+class PublicsStream;
+struct PublicsStreamHeader;
+
+class PublicsStreamBuilder {
+public:
+ explicit PublicsStreamBuilder(msf::MSFBuilder &Msf);
+ ~PublicsStreamBuilder();
+
+ PublicsStreamBuilder(const PublicsStreamBuilder &) = delete;
+ PublicsStreamBuilder &operator=(const PublicsStreamBuilder &) = delete;
+
+ Error finalizeMsfLayout();
+ uint32_t calculateSerializedLength() const;
+
+ Error commit(BinaryStreamWriter &PublicsWriter);
+
+ uint32_t getStreamIndex() const { return StreamIdx; }
+ uint32_t getRecordStreamIdx() const { return RecordStreamIdx; }
+
+private:
+ uint32_t StreamIdx = kInvalidStreamIndex;
+ uint32_t RecordStreamIdx = kInvalidStreamIndex;
+ std::vector<PSHashRecord> HashRecords;
+ msf::MSFBuilder &Msf;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/DebugInfo/PDB/Native/RawTypes.h b/include/llvm/DebugInfo/PDB/Native/RawTypes.h
index 771272d6a47d..a3cdd3f09a44 100644
--- a/include/llvm/DebugInfo/PDB/Native/RawTypes.h
+++ b/include/llvm/DebugInfo/PDB/Native/RawTypes.h
@@ -255,6 +255,19 @@ struct ModuleInfoHeader {
/// char ObjFileName[];
};
+// This is PSGSIHDR struct defined in
+// https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.h
+struct PublicsStreamHeader {
+ support::ulittle32_t SymHash;
+ support::ulittle32_t AddrMap;
+ support::ulittle32_t NumThunks;
+ support::ulittle32_t SizeOfThunk;
+ support::ulittle16_t ISectThunkTable;
+ char Padding[2];
+ support::ulittle32_t OffThunkTable;
+ support::ulittle32_t NumSections;
+};
+
/// Defines a 128-bit unique identifier. This maps to a GUID on Windows, but
/// is abstracted here for the purposes of non-Windows platforms that don't have
/// the GUID structure defined.
diff --git a/include/llvm/ExecutionEngine/JITSymbol.h b/include/llvm/ExecutionEngine/JITSymbol.h
index f09e95fddb97..4172f240ba39 100644
--- a/include/llvm/ExecutionEngine/JITSymbol.h
+++ b/include/llvm/ExecutionEngine/JITSymbol.h
@@ -21,6 +21,8 @@
#include <functional>
#include <string>
+#include "llvm/Support/Error.h"
+
namespace llvm {
class GlobalValue;
@@ -41,10 +43,11 @@ public:
enum FlagNames : UnderlyingType {
None = 0,
- Weak = 1U << 0,
- Common = 1U << 1,
- Absolute = 1U << 2,
- Exported = 1U << 3
+ HasError = 1U << 0,
+ Weak = 1U << 1,
+ Common = 1U << 2,
+ Absolute = 1U << 3,
+ Exported = 1U << 4
};
/// @brief Default-construct a JITSymbolFlags instance.
@@ -53,6 +56,11 @@ public:
/// @brief Construct a JITSymbolFlags instance from the given flags.
JITSymbolFlags(FlagNames Flags) : Flags(Flags) {}
+ /// @brief Return true if there was an error retrieving this symbol.
+ bool hasError() const {
+ return (Flags & HasError) == HasError;
+ }
+
/// @brief Returns true is the Weak flag is set.
bool isWeak() const {
return (Flags & Weak) == Weak;
@@ -113,11 +121,17 @@ private:
/// @brief Represents a symbol in the JIT.
class JITSymbol {
public:
- using GetAddressFtor = std::function<JITTargetAddress()>;
+ using GetAddressFtor = std::function<Expected<JITTargetAddress>()>;
+
+ /// @brief Create a 'null' symbol, used to represent a "symbol not found"
+ /// result from a successful (non-erroneous) lookup.
+ JITSymbol(std::nullptr_t)
+ : CachedAddr(0) {}
- /// @brief Create a 'null' symbol that represents failure to find a symbol
- /// definition.
- JITSymbol(std::nullptr_t) {}
+ /// @brief Create a JITSymbol representing an error in the symbol lookup
+ /// process (e.g. a network failure during a remote lookup).
+ JITSymbol(Error Err)
+ : Err(std::move(Err)), Flags(JITSymbolFlags::HasError) {}
/// @brief Create a symbol for a definition with a known address.
JITSymbol(JITTargetAddress Addr, JITSymbolFlags Flags)
@@ -137,18 +151,59 @@ public:
/// user can materialize the definition at any time by calling the getAddress
/// method.
JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
- : GetAddress(std::move(GetAddress)), Flags(Flags) {}
+ : GetAddress(std::move(GetAddress)), CachedAddr(0), Flags(Flags) {}
+
+ JITSymbol(const JITSymbol&) = delete;
+ JITSymbol& operator=(const JITSymbol&) = delete;
+
+ JITSymbol(JITSymbol &&Other)
+ : GetAddress(std::move(Other.GetAddress)), Flags(std::move(Other.Flags)) {
+ if (Flags.hasError())
+ Err = std::move(Other.Err);
+ else
+ CachedAddr = std::move(Other.CachedAddr);
+ }
+
+ JITSymbol& operator=(JITSymbol &&Other) {
+ GetAddress = std::move(Other.GetAddress);
+ Flags = std::move(Other.Flags);
+ if (Flags.hasError())
+ Err = std::move(Other.Err);
+ else
+ CachedAddr = std::move(Other.CachedAddr);
+ return *this;
+ }
+
+ ~JITSymbol() {
+ if (Flags.hasError())
+ Err.~Error();
+ else
+ CachedAddr.~JITTargetAddress();
+ }
/// @brief Returns true if the symbol exists, false otherwise.
- explicit operator bool() const { return CachedAddr || GetAddress; }
+ explicit operator bool() const {
+ return !Flags.hasError() && (CachedAddr || GetAddress);
+ }
+
+ /// @brief Move the error field value out of this JITSymbol.
+ Error takeError() {
+ if (Flags.hasError())
+ return std::move(Err);
+ return Error::success();
+ }
/// @brief Get the address of the symbol in the target address space. Returns
/// '0' if the symbol does not exist.
- JITTargetAddress getAddress() {
+ Expected<JITTargetAddress> getAddress() {
+ assert(!Flags.hasError() && "getAddress called on error value");
if (GetAddress) {
- CachedAddr = GetAddress();
- assert(CachedAddr && "Symbol could not be materialized.");
- GetAddress = nullptr;
+ if (auto CachedAddrOrErr = GetAddress()) {
+ GetAddress = nullptr;
+ CachedAddr = *CachedAddrOrErr;
+ assert(CachedAddr && "Symbol could not be materialized.");
+ } else
+ return CachedAddrOrErr.takeError();
}
return CachedAddr;
}
@@ -157,7 +212,10 @@ public:
private:
GetAddressFtor GetAddress;
- JITTargetAddress CachedAddr = 0;
+ union {
+ JITTargetAddress CachedAddr;
+ Error Err;
+ };
JITSymbolFlags Flags;
};
diff --git a/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
index 8ac1b6bca0a7..c1acca386820 100644
--- a/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -146,7 +146,7 @@ private:
std::unique_ptr<JITSymbolResolver>)>;
struct SourceModuleEntry {
- std::unique_ptr<ResourceOwner<Module>> SourceMod;
+ std::shared_ptr<Module> SourceMod;
std::set<Function*> StubsToClone;
};
@@ -154,7 +154,7 @@ private:
using SourceModuleHandle = typename SourceModulesList::size_type;
SourceModuleHandle
- addSourceModule(std::unique_ptr<ResourceOwner<Module>> M) {
+ addSourceModule(std::shared_ptr<Module> M) {
SourceModuleHandle H = SourceModules.size();
SourceModules.push_back(SourceModuleEntry());
SourceModules.back().SourceMod = std::move(M);
@@ -162,7 +162,7 @@ private:
}
Module& getSourceModule(SourceModuleHandle H) {
- return SourceModules[H].SourceMod->getResource();
+ return *SourceModules[H].SourceMod;
}
std::set<Function*>& getStubsToClone(SourceModuleHandle H) {
@@ -176,19 +176,21 @@ private:
for (auto BLH : BaseLayerHandles)
if (auto Sym = BaseLayer.findSymbolIn(BLH, Name, ExportedSymbolsOnly))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
return nullptr;
}
- void removeModulesFromBaseLayer(BaseLayerT &BaseLayer) {
+ Error removeModulesFromBaseLayer(BaseLayerT &BaseLayer) {
for (auto &BLH : BaseLayerHandles)
- BaseLayer.removeModule(BLH);
+ if (auto Err = BaseLayer.removeModule(BLH))
+ return Err;
+ return Error::success();
}
- std::unique_ptr<JITSymbolResolver> ExternalSymbolResolver;
- std::unique_ptr<ResourceOwner<RuntimeDyld::MemoryManager>> MemMgr;
+ std::shared_ptr<JITSymbolResolver> ExternalSymbolResolver;
std::unique_ptr<IndirectStubsMgrT> StubsMgr;
StaticGlobalRenamer StaticRenamer;
- ModuleAdderFtor ModuleAdder;
SourceModulesList SourceModules;
std::vector<BaseLayerModuleHandleT> BaseLayerHandles;
};
@@ -196,6 +198,7 @@ private:
using LogicalDylibList = std::list<LogicalDylib>;
public:
+
/// @brief Handle to loaded module.
using ModuleHandleT = typename LogicalDylibList::iterator;
@@ -217,48 +220,41 @@ public:
CloneStubsIntoPartitions(CloneStubsIntoPartitions) {}
~CompileOnDemandLayer() {
+ // FIXME: Report error on log.
while (!LogicalDylibs.empty())
- removeModule(LogicalDylibs.begin());
+ consumeError(removeModule(LogicalDylibs.begin()));
}
/// @brief Add a module to the compile-on-demand layer.
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- ModuleHandleT addModule(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
LogicalDylibs.push_back(LogicalDylib());
auto &LD = LogicalDylibs.back();
LD.ExternalSymbolResolver = std::move(Resolver);
LD.StubsMgr = CreateIndirectStubsManager();
- auto &MemMgrRef = *MemMgr;
- LD.MemMgr = wrapOwnership<RuntimeDyld::MemoryManager>(std::move(MemMgr));
-
- LD.ModuleAdder =
- [&MemMgrRef](BaseLayerT &B, std::unique_ptr<Module> M,
- std::unique_ptr<JITSymbolResolver> R) {
- return B.addModule(std::move(M), &MemMgrRef, std::move(R));
- };
-
// Process each of the modules in this module set.
- addLogicalModule(LogicalDylibs.back(), std::move(M));
+ if (auto Err = addLogicalModule(LD, std::move(M)))
+ return std::move(Err);
return std::prev(LogicalDylibs.end());
}
/// @brief Add extra modules to an existing logical module.
- void addExtraModule(ModuleHandleT H, std::shared_ptr<Module> M) {
- addLogicalModule(*H, std::move(M));
+ Error addExtraModule(ModuleHandleT H, std::shared_ptr<Module> M) {
+ return addLogicalModule(*H, std::move(M));
}
/// @brief Remove the module represented by the given handle.
///
/// This will remove all modules in the layers below that were derived from
/// the module represented by H.
- void removeModule(ModuleHandleT H) {
- H->removeModulesFromBaseLayer(BaseLayer);
+ Error removeModule(ModuleHandleT H) {
+ auto Err = H->removeModulesFromBaseLayer(BaseLayer);
LogicalDylibs.erase(H);
+ return Err;
}
/// @brief Search for the given named symbol.
@@ -272,6 +268,8 @@ public:
return Sym;
if (auto Sym = findSymbolIn(LDI, Name, ExportedSymbolsOnly))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
}
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
@@ -309,8 +307,9 @@ public:
}
private:
- template <typename ModulePtrT>
- void addLogicalModule(LogicalDylib &LD, ModulePtrT SrcMPtr) {
+
+ Error addLogicalModule(LogicalDylib &LD, std::shared_ptr<Module> SrcMPtr) {
+
// Rename all static functions / globals to $static.X :
// This will unique the names across all modules in the logical dylib,
// simplifying symbol lookup.
@@ -322,7 +321,7 @@ private:
// Create a logical module handle for SrcM within the logical dylib.
Module &SrcM = *SrcMPtr;
- auto LMId = LD.addSourceModule(wrapOwnership<Module>(std::move(SrcMPtr)));
+ auto LMId = LD.addSourceModule(std::move(SrcMPtr));
// Create stub functions.
const DataLayout &DL = SrcM.getDataLayout();
@@ -335,9 +334,12 @@ private:
// Skip weak functions for which we already have definitions.
auto MangledName = mangle(F.getName(), DL);
- if (F.hasWeakLinkage() || F.hasLinkOnceLinkage())
+ if (F.hasWeakLinkage() || F.hasLinkOnceLinkage()) {
if (auto Sym = LD.findSymbol(BaseLayer, MangledName, false))
continue;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ }
// Record all functions defined by this module.
if (CloneStubsIntoPartitions)
@@ -350,9 +352,15 @@ private:
StubInits[MangledName] =
std::make_pair(CCInfo.getAddress(),
JITSymbolFlags::fromGlobalValue(F));
- CCInfo.setCompileAction([this, &LD, LMId, &F]() {
- return this->extractAndCompile(LD, LMId, F);
- });
+ CCInfo.setCompileAction([this, &LD, LMId, &F]() -> JITTargetAddress {
+ if (auto FnImplAddrOrErr = this->extractAndCompile(LD, LMId, F))
+ return *FnImplAddrOrErr;
+ else {
+ // FIXME: Report error, return to 'abort' or something similar.
+ consumeError(FnImplAddrOrErr.takeError());
+ return 0;
+ }
+ });
}
auto EC = LD.StubsMgr->createStubs(StubInits);
@@ -367,7 +375,7 @@ private:
// empty globals module.
if (SrcM.global_empty() && SrcM.alias_empty() &&
!SrcM.getModuleFlagsMetadata())
- return;
+ return Error::success();
// Create the GlobalValues module.
auto GVsM = llvm::make_unique<Module>((SrcM.getName() + ".globals").str(),
@@ -393,8 +401,9 @@ private:
// Initializers may refer to functions declared (but not defined) in this
// module. Build a materializer to clone decls on demand.
+ Error MaterializerErrors = Error::success();
auto Materializer = createLambdaMaterializer(
- [&LD, &GVsM](Value *V) -> Value* {
+ [&LD, &GVsM, &MaterializerErrors](Value *V) -> Value* {
if (auto *F = dyn_cast<Function>(V)) {
// Decls in the original module just get cloned.
if (F->isDeclaration())
@@ -405,13 +414,24 @@ private:
// instead.
const DataLayout &DL = GVsM->getDataLayout();
std::string FName = mangle(F->getName(), DL);
- auto StubSym = LD.StubsMgr->findStub(FName, false);
unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(F->getType());
- ConstantInt *StubAddr =
- ConstantInt::get(GVsM->getContext(),
- APInt(PtrBitWidth, StubSym.getAddress()));
+ JITTargetAddress StubAddr = 0;
+
+ // Get the address for the stub. If we encounter an error while
+ // doing so, stash it in the MaterializerErrors variable and use a
+ // null address as a placeholder.
+ if (auto StubSym = LD.StubsMgr->findStub(FName, false)) {
+ if (auto StubAddrOrErr = StubSym.getAddress())
+ StubAddr = *StubAddrOrErr;
+ else
+ MaterializerErrors = joinErrors(std::move(MaterializerErrors),
+ StubAddrOrErr.takeError());
+ }
+
+ ConstantInt *StubAddrCI =
+ ConstantInt::get(GVsM->getContext(), APInt(PtrBitWidth, StubAddr));
Constant *Init = ConstantExpr::getCast(Instruction::IntToPtr,
- StubAddr, F->getType());
+ StubAddrCI, F->getType());
return GlobalAlias::create(F->getFunctionType(),
F->getType()->getAddressSpace(),
F->getLinkage(), F->getName(),
@@ -435,22 +455,31 @@ private:
NewA->setAliasee(cast<Constant>(Init));
}
+ if (MaterializerErrors)
+ return MaterializerErrors;
+
// Build a resolver for the globals module and add it to the base layer.
auto GVsResolver = createLambdaResolver(
- [this, &LD](const std::string &Name) {
+ [this, &LD](const std::string &Name) -> JITSymbol {
if (auto Sym = LD.StubsMgr->findStub(Name, false))
return Sym;
if (auto Sym = LD.findSymbol(BaseLayer, Name, false))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
return LD.ExternalSymbolResolver->findSymbolInLogicalDylib(Name);
},
[&LD](const std::string &Name) {
return LD.ExternalSymbolResolver->findSymbol(Name);
});
- auto GVsH = LD.ModuleAdder(BaseLayer, std::move(GVsM),
- std::move(GVsResolver));
- LD.BaseLayerHandles.push_back(GVsH);
+ if (auto GVsHOrErr =
+ BaseLayer.addModule(std::move(GVsM), std::move(GVsResolver)))
+ LD.BaseLayerHandles.push_back(*GVsHOrErr);
+ else
+ return GVsHOrErr.takeError();
+
+ return Error::success();
}
static std::string mangle(StringRef Name, const DataLayout &DL) {
@@ -462,7 +491,7 @@ private:
return MangledName;
}
- JITTargetAddress
+ Expected<JITTargetAddress>
extractAndCompile(LogicalDylib &LD,
typename LogicalDylib::SourceModuleHandle LMId,
Function &F) {
@@ -475,34 +504,42 @@ private:
// Grab the name of the function being called here.
std::string CalledFnName = mangle(F.getName(), SrcM.getDataLayout());
- auto Part = Partition(F);
- auto PartH = emitPartition(LD, LMId, Part);
-
JITTargetAddress CalledAddr = 0;
- for (auto *SubF : Part) {
- std::string FnName = mangle(SubF->getName(), SrcM.getDataLayout());
- auto FnBodySym = BaseLayer.findSymbolIn(PartH, FnName, false);
- assert(FnBodySym && "Couldn't find function body.");
-
- JITTargetAddress FnBodyAddr = FnBodySym.getAddress();
-
- // If this is the function we're calling record the address so we can
- // return it from this function.
- if (SubF == &F)
- CalledAddr = FnBodyAddr;
-
- // Update the function body pointer for the stub.
- if (auto EC = LD.StubsMgr->updatePointer(FnName, FnBodyAddr))
- return 0;
- }
+ auto Part = Partition(F);
+ if (auto PartHOrErr = emitPartition(LD, LMId, Part)) {
+ auto &PartH = *PartHOrErr;
+ for (auto *SubF : Part) {
+ std::string FnName = mangle(SubF->getName(), SrcM.getDataLayout());
+ if (auto FnBodySym = BaseLayer.findSymbolIn(PartH, FnName, false)) {
+ if (auto FnBodyAddrOrErr = FnBodySym.getAddress()) {
+ JITTargetAddress FnBodyAddr = *FnBodyAddrOrErr;
+
+ // If this is the function we're calling record the address so we can
+ // return it from this function.
+ if (SubF == &F)
+ CalledAddr = FnBodyAddr;
+
+ // Update the function body pointer for the stub.
+ if (auto EC = LD.StubsMgr->updatePointer(FnName, FnBodyAddr))
+ return 0;
+
+ } else
+ return FnBodyAddrOrErr.takeError();
+ } else if (auto Err = FnBodySym.takeError())
+ return std::move(Err);
+ else
+ llvm_unreachable("Function not emitted for partition");
+ }
- LD.BaseLayerHandles.push_back(PartH);
+ LD.BaseLayerHandles.push_back(PartH);
+ } else
+ return PartHOrErr.takeError();
return CalledAddr;
}
template <typename PartitionT>
- BaseLayerModuleHandleT
+ Expected<BaseLayerModuleHandleT>
emitPartition(LogicalDylib &LD,
typename LogicalDylib::SourceModuleHandle LMId,
const PartitionT &Part) {
@@ -566,16 +603,18 @@ private:
// Create memory manager and symbol resolver.
auto Resolver = createLambdaResolver(
- [this, &LD](const std::string &Name) {
+ [this, &LD](const std::string &Name) -> JITSymbol {
if (auto Sym = LD.findSymbol(BaseLayer, Name, false))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
return LD.ExternalSymbolResolver->findSymbolInLogicalDylib(Name);
},
[&LD](const std::string &Name) {
return LD.ExternalSymbolResolver->findSymbol(Name);
});
- return LD.ModuleAdder(BaseLayer, std::move(M), std::move(Resolver));
+ return BaseLayer.addModule(std::move(M), std::move(Resolver));
}
BaseLayerT &BaseLayer;
diff --git a/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
index bf8cca406844..d9b45c6a1e29 100644
--- a/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
+++ b/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -17,6 +17,8 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
#include <algorithm>
#include <cstdint>
#include <string>
@@ -99,19 +101,24 @@ public:
/// @brief Run the recorded constructors/destructors through the given JIT
/// layer.
- bool runViaLayer(JITLayerT &JITLayer) const {
+ Error runViaLayer(JITLayerT &JITLayer) const {
using CtorDtorTy = void (*)();
- bool Error = false;
for (const auto &CtorDtorName : CtorDtorNames)
if (auto CtorDtorSym = JITLayer.findSymbolIn(H, CtorDtorName, false)) {
- CtorDtorTy CtorDtor =
- reinterpret_cast<CtorDtorTy>(
- static_cast<uintptr_t>(CtorDtorSym.getAddress()));
- CtorDtor();
- } else
- Error = true;
- return !Error;
+ if (auto AddrOrErr = CtorDtorSym.getAddress()) {
+ CtorDtorTy CtorDtor =
+ reinterpret_cast<CtorDtorTy>(static_cast<uintptr_t>(*AddrOrErr));
+ CtorDtor();
+ } else
+ return AddrOrErr.takeError();
+ } else {
+ if (auto Err = CtorDtorSym.takeError())
+ return Err;
+ else
+ return make_error<JITSymbolNotFound>(CtorDtorName);
+ }
+ return Error::success();
}
private:
diff --git a/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h b/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
index d582e9a33241..ff54ef625ebb 100644
--- a/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
@@ -17,9 +17,14 @@
#include "llvm/ExecutionEngine/JITSymbol.h"
#include <map>
+#include <memory>
#include <string>
namespace llvm {
+
+class Module;
+class JITSymbolResolver;
+
namespace orc {
/// @brief Global mapping layer.
@@ -32,25 +37,22 @@ namespace orc {
template <typename BaseLayerT>
class GlobalMappingLayer {
public:
- /// @brief Handle to a set of added modules.
- using ModuleSetHandleT = typename BaseLayerT::ModuleSetHandleT;
+
+ /// @brief Handle to an added module.
+ using ModuleHandleT = typename BaseLayerT::ModuleHandleT;
/// @brief Construct an GlobalMappingLayer with the given BaseLayer
GlobalMappingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
- /// @brief Add the given module set to the JIT.
+ /// @brief Add the given module to the JIT.
/// @return A handle for the added modules.
- template <typename ModuleSetT, typename MemoryManagerPtrT,
- typename SymbolResolverPtrT>
- ModuleSetHandleT addModuleSet(ModuleSetT Ms,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- return BaseLayer.addModuleSet(std::move(Ms), std::move(MemMgr),
- std::move(Resolver));
+ ModuleHandleT addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
+ return BaseLayer.addModule(std::move(M), std::move(Resolver));
}
/// @brief Remove the module set associated with the handle H.
- void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeModuleSet(H); }
+ void removeModule(ModuleHandleT H) { BaseLayer.removeModule(H); }
/// @brief Manually set the address to return for the given symbol.
void setGlobalMapping(const std::string &Name, JITTargetAddress Addr) {
@@ -78,15 +80,15 @@ public:
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
- /// @brief Get the address of the given symbol in the context of the set of
- /// modules represented by the handle H. This call is forwarded to the
+ /// @brief Get the address of the given symbol in the context of the of the
+ /// module represented by the handle H. This call is forwarded to the
/// base layer's implementation.
- /// @param H The handle for the module set to search in.
+ /// @param H The handle for the module to search in.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it is found in the
- /// given module set.
- JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ /// given module.
+ JITSymbol findSymbolIn(ModuleHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
}
@@ -94,7 +96,7 @@ public:
/// @brief Immediately emit and finalize the module set represented by the
/// given handle.
/// @param H Handle for module set to emit/finalize.
- void emitAndFinalize(ModuleSetHandleT H) {
+ void emitAndFinalize(ModuleHandleT H) {
BaseLayer.emitAndFinalize(H);
}
diff --git a/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h b/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
index 99ccd4d221a5..fadd334bed0f 100644
--- a/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
@@ -50,18 +50,18 @@ public:
/// along with the given memory manager and symbol resolver.
///
/// @return A handle for the added module.
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- ModuleHandleT addModule(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
using CompileResult = decltype(Compile(*M));
auto Obj = std::make_shared<CompileResult>(Compile(*M));
- return BaseLayer.addObject(std::move(Obj), std::move(MemMgr),
- std::move(Resolver));
+ return BaseLayer.addObject(std::move(Obj), std::move(Resolver));
}
/// @brief Remove the module associated with the handle H.
- void removeModule(ModuleHandleT H) { BaseLayer.removeObject(H); }
+ Error removeModule(ModuleHandleT H) {
+ return BaseLayer.removeObject(H);
+ }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
@@ -87,8 +87,8 @@ public:
/// @brief Immediately emit and finalize the module represented by the given
/// handle.
/// @param H Handle for module to emit/finalize.
- void emitAndFinalize(ModuleHandleT H) {
- BaseLayer.emitAndFinalize(H);
+ Error emitAndFinalize(ModuleHandleT H) {
+ return BaseLayer.emitAndFinalize(H);
}
private:
diff --git a/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
index cf6556a33bbd..476061afda59 100644
--- a/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -42,16 +42,14 @@ public:
/// the layer below, along with the memory manager and symbol resolver.
///
/// @return A handle for the added modules.
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- ModuleHandleT addModule(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- return BaseLayer.addModule(Transform(std::move(M)), std::move(MemMgr),
- std::move(Resolver));
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
+ return BaseLayer.addModule(Transform(std::move(M)), std::move(Resolver));
}
/// @brief Remove the module associated with the handle H.
- void removeModule(ModuleHandleT H) { BaseLayer.removeModule(H); }
+ Error removeModule(ModuleHandleT H) { return BaseLayer.removeModule(H); }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
@@ -77,8 +75,8 @@ public:
/// @brief Immediately emit and finalize the module represented by the given
/// handle.
/// @param H Handle for module to emit/finalize.
- void emitAndFinalize(ModuleHandleT H) {
- BaseLayer.emitAndFinalize(H);
+ Error emitAndFinalize(ModuleHandleT H) {
+ return BaseLayer.emitAndFinalize(H);
}
/// @brief Access the transform functor directly.
diff --git a/include/llvm/ExecutionEngine/Orc/LambdaResolver.h b/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
index 6868640d38e8..228392ae0d4a 100644
--- a/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
+++ b/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
@@ -45,7 +45,7 @@ private:
template <typename DylibLookupFtorT,
typename ExternalLookupFtorT>
-std::unique_ptr<LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>>
+std::shared_ptr<LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>>
createLambdaResolver(DylibLookupFtorT DylibLookupFtor,
ExternalLookupFtorT ExternalLookupFtor) {
using LR = LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>;
diff --git a/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h b/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
index 38769aac12af..6c951fab6185 100644
--- a/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
@@ -46,8 +46,9 @@ public:
private:
class EmissionDeferredModule {
public:
- EmissionDeferredModule() = default;
- virtual ~EmissionDeferredModule() = default;
+ EmissionDeferredModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver)
+ : M(std::move(M)), Resolver(std::move(Resolver)) {}
JITSymbol find(StringRef Name, bool ExportedSymbolsOnly, BaseLayerT &B) {
switch (EmitState) {
@@ -59,16 +60,24 @@ private:
std::string PName = Name;
JITSymbolFlags Flags = JITSymbolFlags::fromGlobalValue(*GV);
auto GetAddress =
- [this, ExportedSymbolsOnly, PName, &B]() -> JITTargetAddress {
+ [this, ExportedSymbolsOnly, PName, &B]() -> Expected<JITTargetAddress> {
if (this->EmitState == Emitting)
return 0;
else if (this->EmitState == NotEmitted) {
this->EmitState = Emitting;
- Handle = this->emitToBaseLayer(B);
+ if (auto HandleOrErr = this->emitToBaseLayer(B))
+ Handle = std::move(*HandleOrErr);
+ else
+ return HandleOrErr.takeError();
this->EmitState = Emitted;
}
- auto Sym = B.findSymbolIn(Handle, PName, ExportedSymbolsOnly);
- return Sym.getAddress();
+ if (auto Sym = B.findSymbolIn(Handle, PName, ExportedSymbolsOnly))
+ return Sym.getAddress();
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ else
+ llvm_unreachable("Successful symbol lookup should return "
+ "definition address here");
};
return JITSymbol(std::move(GetAddress), Flags);
} else
@@ -101,33 +110,10 @@ private:
BaseLayer.emitAndFinalize(Handle);
}
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- static std::unique_ptr<EmissionDeferredModule>
- create(BaseLayerT &B, std::shared_ptr<Module> M, MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver);
-
- protected:
- virtual const GlobalValue* searchGVs(StringRef Name,
- bool ExportedSymbolsOnly) const = 0;
- virtual BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) = 0;
-
private:
- enum { NotEmitted, Emitting, Emitted } EmitState = NotEmitted;
- BaseLayerHandleT Handle;
- };
-
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- class EmissionDeferredModuleImpl : public EmissionDeferredModule {
- public:
- EmissionDeferredModuleImpl(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver)
- : M(std::move(M)), MemMgr(std::move(MemMgr)),
- Resolver(std::move(Resolver)) {}
- protected:
const GlobalValue* searchGVs(StringRef Name,
- bool ExportedSymbolsOnly) const override {
+ bool ExportedSymbolsOnly) const {
// FIXME: We could clean all this up if we had a way to reliably demangle
// names: We could just demangle name and search, rather than
// mangling everything else.
@@ -149,15 +135,13 @@ private:
return buildMangledSymbols(Name, ExportedSymbolsOnly);
}
- BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) override {
+ Expected<BaseLayerHandleT> emitToBaseLayer(BaseLayerT &BaseLayer) {
// We don't need the mangled names set any more: Once we've emitted this
// to the base layer we'll just look for symbols there.
MangledSymbols.reset();
- return BaseLayer.addModule(std::move(M), std::move(MemMgr),
- std::move(Resolver));
+ return BaseLayer.addModule(std::move(M), std::move(Resolver));
}
- private:
// If the mangled name of the given GlobalValue matches the given search
// name (and its visibility conforms to the ExportedSymbolsOnly flag) then
// return the symbol. Otherwise, add the mangled name to the Names map and
@@ -207,9 +191,10 @@ private:
return nullptr;
}
+ enum { NotEmitted, Emitting, Emitted } EmitState = NotEmitted;
+ BaseLayerHandleT Handle;
std::shared_ptr<Module> M;
- MemoryManagerPtrT MemMgr;
- SymbolResolverPtrT Resolver;
+ std::shared_ptr<JITSymbolResolver> Resolver;
mutable std::unique_ptr<StringMap<const GlobalValue*>> MangledSymbols;
};
@@ -219,6 +204,7 @@ private:
ModuleListT ModuleList;
public:
+
/// @brief Handle to a loaded module.
using ModuleHandleT = typename ModuleListT::iterator;
@@ -226,24 +212,23 @@ public:
LazyEmittingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
/// @brief Add the given module to the lazy emitting layer.
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- ModuleHandleT addModule(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
return ModuleList.insert(
ModuleList.end(),
- EmissionDeferredModule::create(BaseLayer, std::move(M),
- std::move(MemMgr),
- std::move(Resolver)));
+ llvm::make_unique<EmissionDeferredModule>(std::move(M),
+ std::move(Resolver)));
}
/// @brief Remove the module represented by the given handle.
///
/// This method will free the memory associated with the given module, both
/// in this layer, and the base layer.
- void removeModule(ModuleHandleT H) {
+ Error removeModule(ModuleHandleT H) {
(*H)->removeModuleFromBaseLayer(BaseLayer);
ModuleList.erase(H);
+ return Error::success();
}
/// @brief Search for the given named symbol.
@@ -276,22 +261,11 @@ public:
/// @brief Immediately emit and finalize the module represented by the given
/// handle.
/// @param H Handle for module to emit/finalize.
- void emitAndFinalize(ModuleHandleT H) {
- (*H)->emitAndFinalize(BaseLayer);
+ Error emitAndFinalize(ModuleHandleT H) {
+ return (*H)->emitAndFinalize(BaseLayer);
}
};
-template <typename BaseLayerT>
-template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
-std::unique_ptr<typename LazyEmittingLayer<BaseLayerT>::EmissionDeferredModule>
-LazyEmittingLayer<BaseLayerT>::EmissionDeferredModule::create(
- BaseLayerT &B, std::shared_ptr<Module> M, MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- using EDS = EmissionDeferredModuleImpl<MemoryManagerPtrT, SymbolResolverPtrT>;
- return llvm::make_unique<EDS>(std::move(M), std::move(MemMgr),
- std::move(Resolver));
-}
-
} // end namespace orc
} // end namespace llvm
diff --git a/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h b/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
index c41c1233c0d9..cb47e7520b1a 100644
--- a/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
@@ -16,6 +16,7 @@
#include "llvm/ExecutionEngine/JITSymbol.h"
#include <algorithm>
+#include <memory>
#include <string>
namespace llvm {
@@ -42,16 +43,14 @@ public:
/// memory manager and symbol resolver.
///
/// @return A handle for the added objects.
- template <typename ObjPtrT, typename MemoryManagerPtrT,
- typename SymbolResolverPtrT>
- ObjHandleT addObject(ObjPtrT Obj, MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- return BaseLayer.addObject(Transform(std::move(Obj)), std::move(MemMgr),
- std::move(Resolver));
+ template <typename ObjectPtr>
+ Expected<ObjHandleT> addObject(ObjectPtr Obj,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
+ return BaseLayer.addObject(Transform(std::move(Obj)), std::move(Resolver));
}
/// @brief Remove the object set associated with the handle H.
- void removeObject(ObjHandleT H) { BaseLayer.removeObject(H); }
+ Error removeObject(ObjHandleT H) { return BaseLayer.removeObject(H); }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
@@ -77,7 +76,9 @@ public:
/// @brief Immediately emit and finalize the object set represented by the
/// given handle.
/// @param H Handle for object set to emit/finalize.
- void emitAndFinalize(ObjHandleT H) { BaseLayer.emitAndFinalize(H); }
+ Error emitAndFinalize(ObjHandleT H) {
+ return BaseLayer.emitAndFinalize(H);
+ }
/// @brief Map section addresses for the objects associated with the handle H.
void mapSectionAddress(ObjHandleT H, const void *LocalAddress,
diff --git a/include/llvm/ExecutionEngine/Orc/OrcError.h b/include/llvm/ExecutionEngine/Orc/OrcError.h
index cbb40fad0223..e6374b70967a 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcError.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcError.h
@@ -22,7 +22,8 @@ namespace orc {
enum class OrcErrorCode : int {
// RPC Errors
- RemoteAllocatorDoesNotExist = 1,
+ JITSymbolNotFound = 1,
+ RemoteAllocatorDoesNotExist,
RemoteAllocatorIdAlreadyInUse,
RemoteMProtectAddrUnrecognized,
RemoteIndirectStubsOwnerDoesNotExist,
@@ -37,6 +38,18 @@ enum class OrcErrorCode : int {
std::error_code orcError(OrcErrorCode ErrCode);
+class JITSymbolNotFound : public ErrorInfo<JITSymbolNotFound> {
+public:
+ static char ID;
+
+ JITSymbolNotFound(std::string SymbolName);
+ std::error_code convertToErrorCode() const override;
+ void log(raw_ostream &OS) const override;
+ const std::string &getSymbolName() const;
+private:
+ std::string SymbolName;
+};
+
} // End namespace orc.
} // End namespace llvm.
diff --git a/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h b/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
index 66ad36be01c8..e1016ef95f0c 100644
--- a/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
@@ -228,13 +228,20 @@ private:
public:
+ /// @brief Functor for creating memory managers.
+ using MemoryManagerGetter =
+ std::function<std::shared_ptr<RuntimeDyld::MemoryManager>()>;
+
/// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded,
/// and NotifyFinalized functors.
RTDyldObjectLinkingLayer(
+ MemoryManagerGetter GetMemMgr,
NotifyLoadedFtor NotifyLoaded = NotifyLoadedFtor(),
NotifyFinalizedFtor NotifyFinalized = NotifyFinalizedFtor())
- : NotifyLoaded(std::move(NotifyLoaded)),
- NotifyFinalized(std::move(NotifyFinalized)) {}
+ : GetMemMgr(GetMemMgr),
+ NotifyLoaded(std::move(NotifyLoaded)),
+ NotifyFinalized(std::move(NotifyFinalized)),
+ ProcessAllSections(false) {}
/// @brief Set the 'ProcessAllSections' flag.
///
@@ -251,12 +258,8 @@ public:
///
/// @return A handle that can be used to refer to the loaded objects (for
/// symbol searching, finalization, freeing memory, etc.).
- template <typename MemoryManagerPtrT,
- typename SymbolResolverPtrT>
- ObjHandleT addObject(ObjectPtr Obj,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
-
+ Expected<ObjHandleT> addObject(ObjectPtr Obj,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
auto Finalizer = [&](ObjHandleT H, RuntimeDyld &RTDyld,
const ObjectPtr &ObjToLoad,
std::function<void()> LOSHandleLoad) {
@@ -275,8 +278,9 @@ public:
};
auto LO =
- createLinkedObject(std::move(Obj), std::move(MemMgr), std::move(Resolver),
- std::move(Finalizer), ProcessAllSections);
+ createLinkedObject(std::move(Obj), GetMemMgr(),
+ std::move(Resolver), std::move(Finalizer),
+ ProcessAllSections);
// LOS is an owning-ptr. Keep a non-owning one so that we can set the handle
// below.
auto *LOPtr = LO.get();
@@ -295,9 +299,10 @@ public:
/// indirectly) will result in undefined behavior. If dependence tracking is
/// required to detect or resolve such issues it should be added at a higher
/// layer.
- void removeObject(ObjHandleT H) {
+ Error removeObject(ObjHandleT H) {
// How do we invalidate the symbols in H?
LinkedObjList.erase(H);
+ return Error::success();
}
/// @brief Search for the given named symbol.
@@ -334,13 +339,15 @@ public:
/// @brief Immediately emit and finalize the object set represented by the
/// given handle.
/// @param H Handle for object set to emit/finalize.
- void emitAndFinalize(ObjHandleT H) {
+ Error emitAndFinalize(ObjHandleT H) {
(*H)->finalize();
+ return Error::success();
}
private:
LinkedObjectListT LinkedObjList;
+ MemoryManagerGetter GetMemMgr;
NotifyLoadedFtor NotifyLoaded;
NotifyFinalizedFtor NotifyFinalized;
bool ProcessAllSections = false;
diff --git a/include/llvm/ExecutionEngine/RuntimeDyld.h b/include/llvm/ExecutionEngine/RuntimeDyld.h
index 1925489f7952..56aa04ce694a 100644
--- a/include/llvm/ExecutionEngine/RuntimeDyld.h
+++ b/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -88,21 +88,6 @@ public:
ObjSectionToIDMap ObjSecToIDMap;
};
- template <typename Derived> struct LoadedObjectInfoHelper : LoadedObjectInfo {
- protected:
- LoadedObjectInfoHelper(const LoadedObjectInfoHelper &) = default;
- LoadedObjectInfoHelper() = default;
-
- public:
- LoadedObjectInfoHelper(RuntimeDyldImpl &RTDyld,
- LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
- : LoadedObjectInfo(RTDyld, std::move(ObjSecToIDMap)) {}
-
- std::unique_ptr<llvm::LoadedObjectInfo> clone() const override {
- return llvm::make_unique<Derived>(static_cast<const Derived &>(*this));
- }
- };
-
/// \brief Memory Management.
class MemoryManager {
friend class RuntimeDyld;
diff --git a/include/llvm/IR/Constants.h b/include/llvm/IR/Constants.h
index 8b3a90fa065b..2e72c41ccee3 100644
--- a/include/llvm/IR/Constants.h
+++ b/include/llvm/IR/Constants.h
@@ -680,11 +680,6 @@ class ConstantDataArray final : public ConstantDataSequential {
explicit ConstantDataArray(Type *ty, const char *Data)
: ConstantDataSequential(ty, ConstantDataArrayVal, Data) {}
- /// Allocate space for exactly zero operands.
- void *operator new(size_t s) {
- return User::operator new(s, 0);
- }
-
public:
ConstantDataArray(const ConstantDataArray &) = delete;
@@ -739,11 +734,6 @@ class ConstantDataVector final : public ConstantDataSequential {
explicit ConstantDataVector(Type *ty, const char *Data)
: ConstantDataSequential(ty, ConstantDataVectorVal, Data) {}
- // allocate space for exactly zero operands.
- void *operator new(size_t s) {
- return User::operator new(s, 0);
- }
-
public:
ConstantDataVector(const ConstantDataVector &) = delete;
diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h
index ec33f82f7022..5344a93efb33 100644
--- a/include/llvm/IR/IRBuilder.h
+++ b/include/llvm/IR/IRBuilder.h
@@ -1062,7 +1062,7 @@ public:
Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *RC = dyn_cast<Constant>(RHS)) {
- if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isAllOnesValue())
+ if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
return LHS; // LHS & -1 -> LHS
if (Constant *LC = dyn_cast<Constant>(LHS))
return Insert(Folder.CreateAnd(LC, RC), Name);
@@ -1203,22 +1203,22 @@ public:
return SI;
}
FenceInst *CreateFence(AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
const Twine &Name = "") {
- return Insert(new FenceInst(Context, Ordering, SynchScope), Name);
+ return Insert(new FenceInst(Context, Ordering, SSID), Name);
}
AtomicCmpXchgInst *
CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope = CrossThread) {
+ SyncScope::ID SSID = SyncScope::System) {
return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
- FailureOrdering, SynchScope));
+ FailureOrdering, SSID));
}
AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread) {
- return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SynchScope));
+ SyncScope::ID SSID = SyncScope::System) {
+ return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SSID));
}
Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &Name = "") {
@@ -1517,11 +1517,9 @@ public:
const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
- if (V->getType()->getScalarType()->isPointerTy() &&
- DestTy->getScalarType()->isIntegerTy())
+ if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
return CreatePtrToInt(V, DestTy, Name);
- if (V->getType()->getScalarType()->isIntegerTy() &&
- DestTy->getScalarType()->isPointerTy())
+ if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
return CreateIntToPtr(V, DestTy, Name);
return CreateBitCast(V, DestTy, Name);
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index dc5f37450b48..60ae98869e55 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -52,11 +52,6 @@ class ConstantInt;
class DataLayout;
class LLVMContext;
-enum SynchronizationScope {
- SingleThread = 0,
- CrossThread = 1
-};
-
//===----------------------------------------------------------------------===//
// AllocaInst Class
//===----------------------------------------------------------------------===//
@@ -195,17 +190,16 @@ public:
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
- AtomicOrdering Order, SynchronizationScope SynchScope = CrossThread,
+ AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr)
: LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
- NameStr, isVolatile, Align, Order, SynchScope, InsertBefore) {}
+ NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
@@ -235,34 +229,34 @@ public:
void setAlignment(unsigned Align);
- /// Returns the ordering effect of this fence.
+ /// Returns the ordering constraint of this load instruction.
AtomicOrdering getOrdering() const {
return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
}
- /// Set the ordering constraint on this load. May not be Release or
- /// AcquireRelease.
+ /// Sets the ordering constraint of this load instruction. May not be Release
+ /// or AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
((unsigned)Ordering << 7));
}
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
+ /// Returns the synchronization scope ID of this load instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Specify whether this load is ordered with respect to all
- /// concurrently executing threads, or only with respect to signal handlers
- /// executing in the same thread.
- void setSynchScope(SynchronizationScope xthread) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
- (xthread << 6));
+ /// Sets the synchronization scope ID of this load instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
+ /// Sets the ordering constraint and the synchronization scope ID of this load
+ /// instruction.
void setAtomic(AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread) {
+ SyncScope::ID SSID = SyncScope::System) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
bool isSimple() const { return !isAtomic() && !isVolatile(); }
@@ -297,6 +291,11 @@ private:
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this load instruction. Not quite enough
+ /// room in SubClassData for everything, so synchronization scope ID gets its
+ /// own field.
+ SyncScope::ID SSID;
};
//===----------------------------------------------------------------------===//
@@ -325,11 +324,10 @@ public:
unsigned Align, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
@@ -356,34 +354,34 @@ public:
void setAlignment(unsigned Align);
- /// Returns the ordering effect of this store.
+ /// Returns the ordering constraint of this store instruction.
AtomicOrdering getOrdering() const {
return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
}
- /// Set the ordering constraint on this store. May not be Acquire or
- /// AcquireRelease.
+ /// Sets the ordering constraint of this store instruction. May not be
+ /// Acquire or AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
((unsigned)Ordering << 7));
}
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
+ /// Returns the synchronization scope ID of this store instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Specify whether this store instruction is ordered with respect to all
- /// concurrently executing threads, or only with respect to signal handlers
- /// executing in the same thread.
- void setSynchScope(SynchronizationScope xthread) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
- (xthread << 6));
+ /// Sets the synchronization scope ID of this store instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
+ /// Sets the ordering constraint and the synchronization scope ID of this
+ /// store instruction.
void setAtomic(AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread) {
+ SyncScope::ID SSID = SyncScope::System) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
bool isSimple() const { return !isAtomic() && !isVolatile(); }
@@ -421,6 +419,11 @@ private:
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this store instruction. Not quite enough
+ /// room in SubClassData for everything, so synchronization scope ID gets its
+ /// own field.
+ SyncScope::ID SSID;
};
template <>
@@ -435,7 +438,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
/// An instruction for ordering other memory operations.
class FenceInst : public Instruction {
- void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
@@ -447,10 +450,9 @@ public:
// Ordering may only be Acquire, Release, AcquireRelease, or
// SequentiallyConsistent.
FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
- FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly zero operands
@@ -458,28 +460,26 @@ public:
return User::operator new(s, 0);
}
- /// Returns the ordering effect of this fence.
+ /// Returns the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const {
return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
}
- /// Set the ordering constraint on this fence. May only be Acquire, Release,
- /// AcquireRelease, or SequentiallyConsistent.
+ /// Sets the ordering constraint of this fence instruction. May only be
+ /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
((unsigned)Ordering << 1));
}
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope(getSubclassDataFromInstruction() & 1);
+ /// Returns the synchronization scope ID of this fence instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Specify whether this fence orders other operations with respect to all
- /// concurrently executing threads, or only with respect to signal handlers
- /// executing in the same thread.
- void setSynchScope(SynchronizationScope xthread) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
- xthread);
+ /// Sets the synchronization scope ID of this fence instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -496,6 +496,11 @@ private:
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this fence instruction. Not quite enough
+ /// room in SubClassData for everything, so synchronization scope ID gets its
+ /// own field.
+ SyncScope::ID SSID;
};
//===----------------------------------------------------------------------===//
@@ -509,7 +514,7 @@ private:
class AtomicCmpXchgInst : public Instruction {
void Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
@@ -521,13 +526,11 @@ public:
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
- Instruction *InsertBefore = nullptr);
+ SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
- BasicBlock *InsertAtEnd);
+ SyncScope::ID SSID, BasicBlock *InsertAtEnd);
// allocate space for exactly three operands
void *operator new(size_t s) {
@@ -561,7 +564,12 @@ public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// Set the ordering constraint on this cmpxchg.
+ /// Returns the success ordering constraint of this cmpxchg instruction.
+ AtomicOrdering getSuccessOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ }
+
+ /// Sets the success ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"CmpXchg instructions can only be atomic.");
@@ -569,6 +577,12 @@ public:
((unsigned)Ordering << 2));
}
+ /// Returns the failure ordering constraint of this cmpxchg instruction.
+ AtomicOrdering getFailureOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
+ }
+
+ /// Sets the failure ordering constraint of this cmpxchg instruction.
void setFailureOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"CmpXchg instructions can only be atomic.");
@@ -576,28 +590,14 @@ public:
((unsigned)Ordering << 5));
}
- /// Specify whether this cmpxchg is atomic and orders other operations with
- /// respect to all concurrently executing threads, or only with respect to
- /// signal handlers executing in the same thread.
- void setSynchScope(SynchronizationScope SynchScope) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) |
- (SynchScope << 1));
- }
-
- /// Returns the ordering constraint on this cmpxchg.
- AtomicOrdering getSuccessOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
- }
-
- /// Returns the ordering constraint on this cmpxchg.
- AtomicOrdering getFailureOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
+ /// Returns the synchronization scope ID of this cmpxchg instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Returns whether this cmpxchg is atomic between threads or only within a
- /// single thread.
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1);
+ /// Sets the synchronization scope ID of this cmpxchg instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
Value *getPointerOperand() { return getOperand(0); }
@@ -652,6 +652,11 @@ private:
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this cmpxchg instruction. Not quite
+ /// enough room in SubClassData for everything, so synchronization scope ID
+ /// gets its own field.
+ SyncScope::ID SSID;
};
template <>
@@ -711,10 +716,10 @@ public:
};
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering, SynchronizationScope SynchScope,
+ AtomicOrdering Ordering, SyncScope::ID SSID,
Instruction *InsertBefore = nullptr);
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering, SynchronizationScope SynchScope,
+ AtomicOrdering Ordering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
@@ -748,7 +753,12 @@ public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// Set the ordering constraint on this RMW.
+ /// Returns the ordering constraint of this rmw instruction.
+ AtomicOrdering getOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ }
+
+ /// Sets the ordering constraint of this rmw instruction.
void setOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"atomicrmw instructions can only be atomic.");
@@ -756,23 +766,14 @@ public:
((unsigned)Ordering << 2));
}
- /// Specify whether this RMW orders other operations with respect to all
- /// concurrently executing threads, or only with respect to signal handlers
- /// executing in the same thread.
- void setSynchScope(SynchronizationScope SynchScope) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) |
- (SynchScope << 1));
+ /// Returns the synchronization scope ID of this rmw instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Returns the ordering constraint on this RMW.
- AtomicOrdering getOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
- }
-
- /// Returns whether this RMW is atomic between threads or only within a
- /// single thread.
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1);
+ /// Sets the synchronization scope ID of this rmw instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
Value *getPointerOperand() { return getOperand(0); }
@@ -797,13 +798,18 @@ public:
private:
void Init(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ AtomicOrdering Ordering, SyncScope::ID SSID);
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this rmw instruction. Not quite enough
+ /// room in SubClassData for everything, so synchronization scope ID gets its
+ /// own field.
+ SyncScope::ID SSID;
};
template <>
@@ -1101,8 +1107,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
/// Represent an integer comparison operator.
class ICmpInst: public CmpInst {
void AssertOK() {
- assert(getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
- getPredicate() <= CmpInst::LAST_ICMP_PREDICATE &&
+ assert(isIntPredicate() &&
"Invalid ICmp predicate value");
assert(getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!");
@@ -1244,8 +1249,7 @@ public:
/// Represents a floating point comparison operator.
class FCmpInst: public CmpInst {
void AssertOK() {
- assert(getPredicate() <= FCmpInst::LAST_FCMP_PREDICATE &&
- "Invalid FCmp predicate value");
+ assert(isFPPredicate() && "Invalid FCmp predicate value");
assert(getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!");
// Check that the operands are the right type
diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h
index 944af57a7800..f55d17ec72c8 100644
--- a/include/llvm/IR/IntrinsicInst.h
+++ b/include/llvm/IR/IntrinsicInst.h
@@ -296,6 +296,175 @@ namespace llvm {
}
};
+ class ElementUnorderedAtomicMemMoveInst : public IntrinsicInst {
+ private:
+ enum { ARG_DEST = 0, ARG_SOURCE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 };
+
+ public:
+ Value *getRawDest() const {
+ return const_cast<Value *>(getArgOperand(ARG_DEST));
+ }
+ const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
+ Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
+
+ /// Return the arguments to the instruction.
+ Value *getRawSource() const {
+ return const_cast<Value *>(getArgOperand(ARG_SOURCE));
+ }
+ const Use &getRawSourceUse() const { return getArgOperandUse(ARG_SOURCE); }
+ Use &getRawSourceUse() { return getArgOperandUse(ARG_SOURCE); }
+
+ Value *getLength() const {
+ return const_cast<Value *>(getArgOperand(ARG_LENGTH));
+ }
+ const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
+ Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
+
+ bool isVolatile() const { return false; }
+
+ Value *getRawElementSizeInBytes() const {
+ return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+ }
+
+ ConstantInt *getElementSizeInBytesCst() const {
+ return cast<ConstantInt>(getRawElementSizeInBytes());
+ }
+
+ uint32_t getElementSizeInBytes() const {
+ return getElementSizeInBytesCst()->getZExtValue();
+ }
+
+ /// This is just like getRawDest, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+
+ /// This is just like getRawSource, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+
+ unsigned getDestAddressSpace() const {
+ return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
+
+ unsigned getSourceAddressSpace() const {
+ return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+ }
+
+ /// Set the specified arguments of the instruction.
+ void setDest(Value *Ptr) {
+ assert(getRawDest()->getType() == Ptr->getType() &&
+ "setDest called with pointer of wrong type!");
+ setArgOperand(ARG_DEST, Ptr);
+ }
+
+ void setSource(Value *Ptr) {
+ assert(getRawSource()->getType() == Ptr->getType() &&
+ "setSource called with pointer of wrong type!");
+ setArgOperand(ARG_SOURCE, Ptr);
+ }
+
+ void setLength(Value *L) {
+ assert(getLength()->getType() == L->getType() &&
+ "setLength called with value of wrong type!");
+ setArgOperand(ARG_LENGTH, L);
+ }
+
+ void setElementSizeInBytes(Constant *V) {
+ assert(V->getType() == Type::getInt8Ty(getContext()) &&
+ "setElementSizeInBytes called with value of wrong type!");
+ setArgOperand(ARG_ELEMENTSIZE, V);
+ }
+
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+ /// This class represents atomic memset intrinsic
+ /// TODO: Integrate this class into MemIntrinsic hierarchy; for now this is
+ /// C&P of all methods from that hierarchy
+ class ElementUnorderedAtomicMemSetInst : public IntrinsicInst {
+ private:
+ enum { ARG_DEST = 0, ARG_VALUE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 };
+
+ public:
+ Value *getRawDest() const {
+ return const_cast<Value *>(getArgOperand(ARG_DEST));
+ }
+ const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
+ Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
+
+ Value *getValue() const { return const_cast<Value*>(getArgOperand(ARG_VALUE)); }
+ const Use &getValueUse() const { return getArgOperandUse(ARG_VALUE); }
+ Use &getValueUse() { return getArgOperandUse(ARG_VALUE); }
+
+ Value *getLength() const {
+ return const_cast<Value *>(getArgOperand(ARG_LENGTH));
+ }
+ const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
+ Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
+
+ bool isVolatile() const { return false; }
+
+ Value *getRawElementSizeInBytes() const {
+ return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+ }
+
+ ConstantInt *getElementSizeInBytesCst() const {
+ return cast<ConstantInt>(getRawElementSizeInBytes());
+ }
+
+ uint32_t getElementSizeInBytes() const {
+ return getElementSizeInBytesCst()->getZExtValue();
+ }
+
+ /// This is just like getRawDest, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+
+ unsigned getDestAddressSpace() const {
+ return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
+
+ /// Set the specified arguments of the instruction.
+ void setDest(Value *Ptr) {
+ assert(getRawDest()->getType() == Ptr->getType() &&
+ "setDest called with pointer of wrong type!");
+ setArgOperand(ARG_DEST, Ptr);
+ }
+
+ void setValue(Value *Val) {
+ assert(getValue()->getType() == Val->getType() &&
+ "setValue called with value of wrong type!");
+ setArgOperand(ARG_VALUE, Val);
+ }
+
+ void setLength(Value *L) {
+ assert(getLength()->getType() == L->getType() &&
+ "setLength called with value of wrong type!");
+ setArgOperand(ARG_LENGTH, L);
+ }
+
+ void setElementSizeInBytes(Constant *V) {
+ assert(V->getType() == Type::getInt8Ty(getContext()) &&
+ "setElementSizeInBytes called with value of wrong type!");
+ setArgOperand(ARG_ELEMENTSIZE, V);
+ }
+
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
/// This is the common base class for memset/memcpy/memmove.
class MemIntrinsic : public IntrinsicInst {
public:
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index 45936a6e9b66..14c88e519435 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -873,6 +873,22 @@ def int_memcpy_element_unordered_atomic
ReadOnly<1>
]>;
+// @llvm.memmove.element.unordered.atomic.*(dest, src, length, elementsize)
+def int_memmove_element_unordered_atomic
+ : Intrinsic<[],
+ [
+ llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
+ ],
+ [
+ IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
+ ReadOnly<1>
+ ]>;
+
+// @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize)
+def int_memset_element_unordered_atomic
+ : Intrinsic<[], [ llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty ],
+ [ IntrArgMemOnly, NoCapture<0>, WriteOnly<0> ]>;
+
//===------------------------ Reduction Intrinsics ------------------------===//
//
def int_experimental_vector_reduce_fadd : Intrinsic<[llvm_anyfloat_ty],
diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h
index b27abad618c9..4cb77701f762 100644
--- a/include/llvm/IR/LLVMContext.h
+++ b/include/llvm/IR/LLVMContext.h
@@ -42,6 +42,24 @@ class Output;
} // end namespace yaml
+namespace SyncScope {
+
+typedef uint8_t ID;
+
+/// Known synchronization scope IDs, which always have the same value. All
+/// synchronization scope IDs that LLVM has special knowledge of are listed
+/// here. Additionally, this scheme allows LLVM to efficiently check for
+/// specific synchronization scope ID without comparing strings.
+enum {
+ /// Synchronized with respect to signal handlers executing in the same thread.
+ SingleThread = 0,
+
+ /// Synchronized with respect to all concurrently executing threads.
+ System = 1
+};
+
+} // end namespace SyncScope
+
/// This is an important class for using LLVM in a threaded context. It
/// (opaquely) owns and manages the core "global" data of LLVM's core
/// infrastructure, including the type and constant uniquing tables.
@@ -111,6 +129,16 @@ public:
/// tag registered with an LLVMContext has an unique ID.
uint32_t getOperandBundleTagID(StringRef Tag) const;
+ /// getOrInsertSyncScopeID - Maps synchronization scope name to
+ /// synchronization scope ID. Every synchronization scope registered with
+ /// LLVMContext has unique ID except pre-defined ones.
+ SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
+
+ /// getSyncScopeNames - Populates client supplied SmallVector with
+ /// synchronization scope names registered with LLVMContext. Synchronization
+ /// scope names are ordered by increasing synchronization scope IDs.
+ void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
+
/// Define the GC for a function
void setGC(const Function &Fn, std::string GCName);
diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h
index d47d82a57bff..196e32e3615c 100644
--- a/include/llvm/IR/Module.h
+++ b/include/llvm/IR/Module.h
@@ -249,7 +249,7 @@ public:
/// when other randomness consuming passes are added or removed. In
/// addition, the random stream will be reproducible across LLVM
/// versions when the pass does not change.
- RandomNumberGenerator *createRNG(const Pass* P) const;
+ std::unique_ptr<RandomNumberGenerator> createRNG(const Pass* P) const;
/// @}
/// @name Module Level Mutators
diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h
index b43d58865862..4aa8a0199ab1 100644
--- a/include/llvm/IR/ModuleSummaryIndex.h
+++ b/include/llvm/IR/ModuleSummaryIndex.h
@@ -47,7 +47,13 @@ template <typename T> struct MappingTraits;
/// \brief Class to accumulate and hold information about a callee.
struct CalleeInfo {
- enum class HotnessType : uint8_t { Unknown = 0, Cold = 1, None = 2, Hot = 3 };
+ enum class HotnessType : uint8_t {
+ Unknown = 0,
+ Cold = 1,
+ None = 2,
+ Hot = 3,
+ Critical = 4
+ };
HotnessType Hotness = HotnessType::Unknown;
CalleeInfo() = default;
@@ -516,7 +522,7 @@ using ModulePathStringTableTy = StringMap<std::pair<uint64_t, ModuleHash>>;
/// Map of global value GUID to its summary, used to identify values defined in
/// a particular module, and provide efficient access to their summary.
-using GVSummaryMapTy = std::map<GlobalValue::GUID, GlobalValueSummary *>;
+using GVSummaryMapTy = DenseMap<GlobalValue::GUID, GlobalValueSummary *>;
/// Class to hold module path string table and global value map,
/// and encapsulate methods for operating on them.
diff --git a/include/llvm/IR/PassManager.h b/include/llvm/IR/PassManager.h
index d03b7b65f81e..393175675034 100644
--- a/include/llvm/IR/PassManager.h
+++ b/include/llvm/IR/PassManager.h
@@ -162,6 +162,14 @@ public:
return PA;
}
+ /// \brief Construct a preserved analyses object with a single preserved set.
+ template <typename AnalysisSetT>
+ static PreservedAnalyses allInSet() {
+ PreservedAnalyses PA;
+ PA.preserveSet<AnalysisSetT>();
+ return PA;
+ }
+
/// Mark an analysis as preserved.
template <typename AnalysisT> void preserve() { preserve(AnalysisT::ID()); }
@@ -1062,10 +1070,27 @@ public:
const AnalysisManagerT &getManager() const { return *AM; }
- /// \brief Handle invalidation by ignoring it; this pass is immutable.
+ /// When invalidation occurs, remove any registered invalidation events.
bool invalidate(
- IRUnitT &, const PreservedAnalyses &,
- typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &) {
+ IRUnitT &IRUnit, const PreservedAnalyses &PA,
+ typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &Inv) {
+ // Loop over the set of registered outer invalidation mappings and if any
+ // of them map to an analysis that is now invalid, clear it out.
+ SmallVector<AnalysisKey *, 4> DeadKeys;
+ for (auto &KeyValuePair : OuterAnalysisInvalidationMap) {
+ AnalysisKey *OuterID = KeyValuePair.first;
+ auto &InnerIDs = KeyValuePair.second;
+ InnerIDs.erase(llvm::remove_if(InnerIDs, [&](AnalysisKey *InnerID) {
+ return Inv.invalidate(InnerID, IRUnit, PA); }),
+ InnerIDs.end());
+ if (InnerIDs.empty())
+ DeadKeys.push_back(OuterID);
+ }
+
+ for (auto OuterID : DeadKeys)
+ OuterAnalysisInvalidationMap.erase(OuterID);
+
+ // The proxy itself remains valid regardless of anything else.
return false;
}
diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h
index 5b69e7855cc7..acb895211644 100644
--- a/include/llvm/IR/PatternMatch.h
+++ b/include/llvm/IR/PatternMatch.h
@@ -158,12 +158,18 @@ struct match_neg_zero {
/// zero
inline match_neg_zero m_NegZero() { return match_neg_zero(); }
+struct match_any_zero {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isZeroValue();
+ return false;
+ }
+};
+
/// \brief - Match an arbitrary zero/null constant. This includes
/// zero_initializer for vectors and ConstantPointerNull for pointers. For
/// floating point constants, this will match negative zero and positive zero
-inline match_combine_or<match_zero, match_neg_zero> m_AnyZero() {
- return m_CombineOr(m_Zero(), m_NegZero());
-}
+inline match_any_zero m_AnyZero() { return match_any_zero(); }
struct match_nan {
template <typename ITy> bool match(ITy *V) {
@@ -176,6 +182,39 @@ struct match_nan {
/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
inline match_nan m_NaN() { return match_nan(); }
+struct match_one {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isOneValue();
+ return false;
+ }
+};
+
+/// \brief Match an integer 1 or a vector with all elements equal to 1.
+inline match_one m_One() { return match_one(); }
+
+struct match_all_ones {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isAllOnesValue();
+ return false;
+ }
+};
+
+/// \brief Match an integer or vector with all bits set to true.
+inline match_all_ones m_AllOnes() { return match_all_ones(); }
+
+struct match_sign_mask {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isMinSignedValue();
+ return false;
+ }
+};
+
+/// \brief Match an integer or vector with only the sign bit(s) set.
+inline match_sign_mask m_SignMask() { return match_sign_mask(); }
+
struct apint_match {
const APInt *&Res;
@@ -259,34 +298,6 @@ template <typename Predicate> struct api_pred_ty : public Predicate {
}
};
-struct is_one {
- bool isValue(const APInt &C) { return C.isOneValue(); }
-};
-
-/// \brief Match an integer 1 or a vector with all elements equal to 1.
-inline cst_pred_ty<is_one> m_One() { return cst_pred_ty<is_one>(); }
-inline api_pred_ty<is_one> m_One(const APInt *&V) { return V; }
-
-struct is_all_ones {
- bool isValue(const APInt &C) { return C.isAllOnesValue(); }
-};
-
-/// \brief Match an integer or vector with all bits set to true.
-inline cst_pred_ty<is_all_ones> m_AllOnes() {
- return cst_pred_ty<is_all_ones>();
-}
-inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; }
-
-struct is_sign_mask {
- bool isValue(const APInt &C) { return C.isSignMask(); }
-};
-
-/// \brief Match an integer or vector with only the sign bit(s) set.
-inline cst_pred_ty<is_sign_mask> m_SignMask() {
- return cst_pred_ty<is_sign_mask>();
-}
-inline api_pred_ty<is_sign_mask> m_SignMask(const APInt *&V) { return V; }
-
struct is_power2 {
bool isValue(const APInt &C) { return C.isPowerOf2(); }
};
diff --git a/include/llvm/IR/SafepointIRVerifier.h b/include/llvm/IR/SafepointIRVerifier.h
new file mode 100644
index 000000000000..092050d1d207
--- /dev/null
+++ b/include/llvm/IR/SafepointIRVerifier.h
@@ -0,0 +1,35 @@
+//===- SafepointIRVerifier.h - Checks for GC relocation problems *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a verifier which is useful for enforcing the relocation
+// properties required by a relocating GC. Specifically, it looks for uses of
+// the unrelocated value of pointer SSA values after a possible safepoint. It
+// attempts to report no false negatives, but may end up reporting false
+// positives in rare cases (see the note at the top of the corresponding cpp
+// file.)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_SAFEPOINT_IR_VERIFIER
+#define LLVM_IR_SAFEPOINT_IR_VERIFIER
+
+namespace llvm {
+
+class Function;
+class FunctionPass;
+
+/// Run the safepoint verifier over a single function. Crashes on failure.
+void verifySafepointIR(Function &F);
+
+/// Create an instance of the safepoint verifier pass which can be added to
+/// a pass pipeline to check for relocation bugs.
+FunctionPass *createSafepointIRVerifierPass();
+}
+
+#endif // LLVM_IR_SAFEPOINT_IR_VERIFIER
diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h
index b37b59288e3f..ef7801266777 100644
--- a/include/llvm/IR/Type.h
+++ b/include/llvm/IR/Type.h
@@ -202,6 +202,12 @@ public:
/// Return true if this is an integer type or a vector of integer types.
bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
+ /// Return true if this is an integer type or a vector of integer types of
+ /// the given width.
+ bool isIntOrIntVectorTy(unsigned BitWidth) const {
+ return getScalarType()->isIntegerTy(BitWidth);
+ }
+
/// True if this is an instance of FunctionType.
bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h
index aab14070dbda..39ac4649b70d 100644
--- a/include/llvm/InitializePasses.h
+++ b/include/llvm/InitializePasses.h
@@ -303,6 +303,7 @@ void initializeProfileSummaryInfoWrapperPassPass(PassRegistry&);
void initializePromoteLegacyPassPass(PassRegistry&);
void initializePruneEHPass(PassRegistry&);
void initializeRABasicPass(PassRegistry&);
+void initializeRAFastPass(PassRegistry&);
void initializeRAGreedyPass(PassRegistry&);
void initializeReassociateLegacyPassPass(PassRegistry&);
void initializeRegBankSelectPass(PassRegistry&);
@@ -318,6 +319,7 @@ void initializeResetMachineFunctionPass(PassRegistry&);
void initializeReversePostOrderFunctionAttrsLegacyPassPass(PassRegistry&);
void initializeRewriteStatepointsForGCPass(PassRegistry&);
void initializeRewriteSymbolsLegacyPassPass(PassRegistry&);
+void initializeSafepointIRVerifierPass(PassRegistry&);
void initializeSCCPLegacyPassPass(PassRegistry&);
void initializeSCEVAAWrapperPassPass(PassRegistry&);
void initializeSLPVectorizerPass(PassRegistry&);
diff --git a/include/llvm/MC/MCAsmBackend.h b/include/llvm/MC/MCAsmBackend.h
index c9c43a22da5d..5a8e29d08ad2 100644
--- a/include/llvm/MC/MCAsmBackend.h
+++ b/include/llvm/MC/MCAsmBackend.h
@@ -73,7 +73,7 @@ public:
/// reported via \p Ctx.
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const = 0;
+ uint64_t Value, bool IsResolved) const = 0;
/// @}
diff --git a/include/llvm/MC/MCDisassembler/MCDisassembler.h b/include/llvm/MC/MCDisassembler/MCDisassembler.h
index 5e626f186986..7f09c05ccf2a 100644
--- a/include/llvm/MC/MCDisassembler/MCDisassembler.h
+++ b/include/llvm/MC/MCDisassembler/MCDisassembler.h
@@ -68,6 +68,7 @@ public:
/// an invalid instruction.
/// \param Address - The address, in the memory space of region, of the first
/// byte of the instruction.
+ /// \param Bytes - A reference to the actual bytes of the instruction.
/// \param VStream - The stream to print warnings and diagnostic messages on.
/// \param CStream - The stream to print comments and annotations on.
/// \return - MCDisassembler::Success if the instruction is valid,
diff --git a/include/llvm/MC/MCMachObjectWriter.h b/include/llvm/MC/MCMachObjectWriter.h
index 2d2480a27223..42dc90da3049 100644
--- a/include/llvm/MC/MCMachObjectWriter.h
+++ b/include/llvm/MC/MCMachObjectWriter.h
@@ -233,8 +233,7 @@ public:
void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, bool &IsPCRel,
- uint64_t &FixedValue) override;
+ MCValue Target, uint64_t &FixedValue) override;
void bindIndirectSymbols(MCAssembler &Asm);
diff --git a/include/llvm/MC/MCObjectWriter.h b/include/llvm/MC/MCObjectWriter.h
index 86bcbb6861d7..cd90690fb186 100644
--- a/include/llvm/MC/MCObjectWriter.h
+++ b/include/llvm/MC/MCObjectWriter.h
@@ -86,7 +86,7 @@ public:
virtual void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
- bool &IsPCRel, uint64_t &FixedValue) = 0;
+ uint64_t &FixedValue) = 0;
/// Check whether the difference (A - B) between two symbol references is
/// fully resolved.
diff --git a/include/llvm/MC/MCSymbolWasm.h b/include/llvm/MC/MCSymbolWasm.h
index 7ea89629efda..9bae6c582faa 100644
--- a/include/llvm/MC/MCSymbolWasm.h
+++ b/include/llvm/MC/MCSymbolWasm.h
@@ -21,6 +21,8 @@ private:
std::string ModuleName;
SmallVector<wasm::ValType, 1> Returns;
SmallVector<wasm::ValType, 4> Params;
+ bool ParamsSet = false;
+ bool ReturnsSet = false;
/// An expression describing how to calculate the size of a symbol. If a
/// symbol has no size this field will be NULL.
@@ -45,15 +47,23 @@ public:
const StringRef getModuleName() const { return ModuleName; }
- const SmallVector<wasm::ValType, 1> &getReturns() const { return Returns; }
+ const SmallVector<wasm::ValType, 1> &getReturns() const {
+ assert(ReturnsSet);
+ return Returns;
+ }
void setReturns(SmallVectorImpl<wasm::ValType> &&Rets) {
+ ReturnsSet = true;
Returns = std::move(Rets);
}
- const SmallVector<wasm::ValType, 4> &getParams() const { return Params; }
+ const SmallVector<wasm::ValType, 4> &getParams() const {
+ assert(ParamsSet);
+ return Params;
+ }
void setParams(SmallVectorImpl<wasm::ValType> &&Pars) {
+ ParamsSet = true;
Params = std::move(Pars);
}
};
diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h
index 78e0b5f6ed30..89c1ba6be35f 100644
--- a/include/llvm/Object/COFF.h
+++ b/include/llvm/Object/COFF.h
@@ -698,6 +698,9 @@ struct coff_resource_dir_entry {
uint32_t getNameOffset() const {
return maskTrailingOnes<uint32_t>(31) & NameOffset;
}
+ // Even though the PE/COFF spec doesn't mention this, the high bit of a name
+ // offset is set.
+ void setNameOffset(uint32_t Offset) { NameOffset = Offset | (1 << 31); }
} Identifier;
union {
support::ulittle32_t DataEntryOffset;
diff --git a/include/llvm/Object/Wasm.h b/include/llvm/Object/Wasm.h
index 5c8445f10f44..07ee4a4d6c4d 100644
--- a/include/llvm/Object/Wasm.h
+++ b/include/llvm/Object/Wasm.h
@@ -61,7 +61,7 @@ public:
void print(raw_ostream &Out) const {
Out << "Name=" << Name << ", Type=" << static_cast<int>(Type)
- << ", Flags=" << Flags;
+ << ", Flags=" << Flags << " ElemIndex=" << ElementIndex;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -69,8 +69,7 @@ public:
#endif
};
-class WasmSection {
-public:
+struct WasmSection {
WasmSection() = default;
uint32_t Type = 0; // Section type (See below)
@@ -80,6 +79,11 @@ public:
std::vector<wasm::WasmRelocation> Relocations; // Relocations for this section
};
+struct WasmSegment {
+ uint32_t SectionOffset;
+ wasm::WasmDataSegment Data;
+};
+
class WasmObjectFile : public ObjectFile {
public:
@@ -110,7 +114,7 @@ public:
return ElemSegments;
}
- const std::vector<wasm::WasmDataSegment>& dataSegments() const {
+ const std::vector<WasmSegment>& dataSegments() const {
return DataSegments;
}
@@ -210,7 +214,7 @@ private:
std::vector<wasm::WasmImport> Imports;
std::vector<wasm::WasmExport> Exports;
std::vector<wasm::WasmElemSegment> ElemSegments;
- std::vector<wasm::WasmDataSegment> DataSegments;
+ std::vector<WasmSegment> DataSegments;
std::vector<wasm::WasmFunction> Functions;
std::vector<WasmSymbol> Symbols;
ArrayRef<uint8_t> CodeSection;
diff --git a/include/llvm/ObjectYAML/WasmYAML.h b/include/llvm/ObjectYAML/WasmYAML.h
index 6bf08d340eeb..709ad8ec3b77 100644
--- a/include/llvm/ObjectYAML/WasmYAML.h
+++ b/include/llvm/ObjectYAML/WasmYAML.h
@@ -98,7 +98,8 @@ struct Relocation {
};
struct DataSegment {
- uint32_t Index;
+ uint32_t MemoryIndex;
+ uint32_t SectionOffset;
wasm::WasmInitExpr Offset;
yaml::BinaryRef Content;
};
diff --git a/include/llvm/Option/OptTable.h b/include/llvm/Option/OptTable.h
index 3e7b019a0d4e..a35e182f00e5 100644
--- a/include/llvm/Option/OptTable.h
+++ b/include/llvm/Option/OptTable.h
@@ -140,7 +140,8 @@ public:
// to start with.
///
/// \return The vector of flags which start with Cur.
- std::vector<std::string> findByPrefix(StringRef Cur) const;
+ std::vector<std::string> findByPrefix(StringRef Cur,
+ unsigned short DisableFlags) const;
/// \brief Parse a single argument; returning the new argument and
/// updating Index.
diff --git a/include/llvm/Passes/PassBuilder.h b/include/llvm/Passes/PassBuilder.h
index ff1958397331..33433f6b4a10 100644
--- a/include/llvm/Passes/PassBuilder.h
+++ b/include/llvm/Passes/PassBuilder.h
@@ -46,6 +46,19 @@ class PassBuilder {
Optional<PGOOptions> PGOOpt;
public:
+ /// \brief A struct to capture parsed pass pipeline names.
+ ///
+ /// A pipeline is defined as a series of names, each of which may in itself
+ /// recursively contain a nested pipeline. A name is either the name of a pass
+ /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
+ /// name is the name of a pass, the InnerPipeline is empty, since passes
+ /// cannot contain inner pipelines. See parsePassPipeline() for a more
+ /// detailed description of the textual pipeline format.
+ struct PipelineElement {
+ StringRef Name;
+ std::vector<PipelineElement> InnerPipeline;
+ };
+
/// \brief LLVM-provided high-level optimization levels.
///
/// This enumerates the LLVM-provided high-level optimization levels. Each
@@ -188,9 +201,14 @@ public:
/// only intended for use when attempting to optimize code. If frontends
/// require some transformations for semantic reasons, they should explicitly
/// build them.
+ ///
+ /// \p PrepareForThinLTO indicates whether this is invoked in
+ /// PrepareForThinLTO phase. Special handling is needed for sample PGO to
+ /// ensure profile accurate in the backend profile annotation phase.
FunctionPassManager
buildFunctionSimplificationPipeline(OptimizationLevel Level,
- bool DebugLogging = false);
+ bool DebugLogging = false,
+ bool PrepareForThinLTO = false);
/// Construct the core LLVM module canonicalization and simplification
/// pipeline.
@@ -205,9 +223,14 @@ public:
/// only intended for use when attempting to optimize code. If frontends
/// require some transformations for semantic reasons, they should explicitly
/// build them.
+ ///
+ /// \p PrepareForThinLTO indicates whether this is invoked in
+ /// PrepareForThinLTO phase. Special handling is needed for sample PGO to
+ /// ensure profile accurate in the backend profile annotation phase.
ModulePassManager
buildModuleSimplificationPipeline(OptimizationLevel Level,
- bool DebugLogging = false);
+ bool DebugLogging = false,
+ bool PrepareForThinLTO = false);
/// Construct the core LLVM module optimization pipeline.
///
@@ -302,7 +325,8 @@ public:
/// registered.
AAManager buildDefaultAAPipeline();
- /// \brief Parse a textual pass pipeline description into a \c ModulePassManager.
+ /// \brief Parse a textual pass pipeline description into a \c
+ /// ModulePassManager.
///
/// The format of the textual pass pipeline description looks something like:
///
@@ -312,8 +336,8 @@ public:
/// are comma separated. As a special shortcut, if the very first pass is not
/// a module pass (as a module pass manager is), this will automatically form
/// the shortest stack of pass managers that allow inserting that first pass.
- /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop passes
- /// 'lpassN', all of these are valid:
+ /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
+ /// passes 'lpassN', all of these are valid:
///
/// fpass1,fpass2,fpass3
/// cgpass1,cgpass2,cgpass3
@@ -326,13 +350,28 @@ public:
/// module(function(loop(lpass1,lpass2,lpass3)))
///
/// This shortcut is especially useful for debugging and testing small pass
- /// combinations. Note that these shortcuts don't introduce any other magic. If
- /// the sequence of passes aren't all the exact same kind of pass, it will be
- /// an error. You cannot mix different levels implicitly, you must explicitly
- /// form a pass manager in which to nest passes.
+ /// combinations. Note that these shortcuts don't introduce any other magic.
+ /// If the sequence of passes aren't all the exact same kind of pass, it will
+ /// be an error. You cannot mix different levels implicitly, you must
+ /// explicitly form a pass manager in which to nest passes.
bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
bool VerifyEachPass = true, bool DebugLogging = false);
+ /// {{@ Parse a textual pass pipeline description into a specific PassManager
+ ///
+ /// Automatic deduction of an appropriate pass manager stack is not supported.
+ /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
+ /// this is the valid pipeline text:
+ ///
+ /// function(lpass)
+ bool parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
+ bool VerifyEachPass = true, bool DebugLogging = false);
+ bool parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
+ bool VerifyEachPass = true, bool DebugLogging = false);
+ bool parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
+ bool VerifyEachPass = true, bool DebugLogging = false);
+ /// @}}
+
/// Parse a textual alias analysis pipeline into the provided AA manager.
///
/// The format of the textual AA pipeline is a comma separated list of AA
@@ -350,13 +389,139 @@ public:
/// returns false.
bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
-private:
- /// A struct to capture parsed pass pipeline names.
- struct PipelineElement {
- StringRef Name;
- std::vector<PipelineElement> InnerPipeline;
- };
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding passes that perform peephole
+ /// optimizations similar to the instruction combiner. These passes will be
+ /// inserted after each instance of the instruction combiner pass.
+ void registerPeepholeEPCallback(
+ const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+ PeepholeEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding late loop canonicalization and
+ /// simplification passes. This is the last point in the loop optimization
+ /// pipeline before loop deletion. Each pass added
+ /// here must be an instance of LoopPass.
+ /// This is the place to add passes that can remove loops, such as target-
+ /// specific loop idiom recognition.
+ void registerLateLoopOptimizationsEPCallback(
+ const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
+ LateLoopOptimizationsEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding loop passes to the end of the loop
+ /// optimizer.
+ void registerLoopOptimizerEndEPCallback(
+ const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
+ LoopOptimizerEndEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding optimization passes after most of the
+ /// main optimizations, but before the last cleanup-ish optimizations.
+ void registerScalarOptimizerLateEPCallback(
+ const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+ ScalarOptimizerLateEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding CallGraphSCC passes at the end of the
+ /// main CallGraphSCC passes and before any function simplification passes run
+ /// by CGPassManager.
+ void registerCGSCCOptimizerLateEPCallback(
+ const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
+ CGSCCOptimizerLateEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding optimization passes before the
+ /// vectorizer and other highly target specific optimization passes are
+ /// executed.
+ void registerVectorizerStartEPCallback(
+ const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+ VectorizerStartEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for parsing an AliasAnalysis Name to populate
+ /// the given AAManager \p AA
+ void registerParseAACallback(
+ const std::function<bool(StringRef Name, AAManager &AA)> &C) {
+ AAParsingCallbacks.push_back(C);
+ }
+
+ /// {{@ Register callbacks for analysis registration with this PassBuilder
+ /// instance.
+ /// Callees register their analyses with the given AnalysisManager objects.
+ void registerAnalysisRegistrationCallback(
+ const std::function<void(CGSCCAnalysisManager &)> &C) {
+ CGSCCAnalysisRegistrationCallbacks.push_back(C);
+ }
+ void registerAnalysisRegistrationCallback(
+ const std::function<void(FunctionAnalysisManager &)> &C) {
+ FunctionAnalysisRegistrationCallbacks.push_back(C);
+ }
+ void registerAnalysisRegistrationCallback(
+ const std::function<void(LoopAnalysisManager &)> &C) {
+ LoopAnalysisRegistrationCallbacks.push_back(C);
+ }
+ void registerAnalysisRegistrationCallback(
+ const std::function<void(ModuleAnalysisManager &)> &C) {
+ ModuleAnalysisRegistrationCallbacks.push_back(C);
+ }
+ /// @}}
+
+ /// {{@ Register pipeline parsing callbacks with this pass builder instance.
+ /// Using these callbacks, callers can parse both a single pass name, as well
+ /// as entire sub-pipelines, and populate the PassManager instance
+ /// accordingly.
+ void registerPipelineParsingCallback(
+ const std::function<bool(StringRef Name, CGSCCPassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ CGSCCPipelineParsingCallbacks.push_back(C);
+ }
+ void registerPipelineParsingCallback(
+ const std::function<bool(StringRef Name, FunctionPassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ FunctionPipelineParsingCallbacks.push_back(C);
+ }
+ void registerPipelineParsingCallback(
+ const std::function<bool(StringRef Name, LoopPassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ LoopPipelineParsingCallbacks.push_back(C);
+ }
+ void registerPipelineParsingCallback(
+ const std::function<bool(StringRef Name, ModulePassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ ModulePipelineParsingCallbacks.push_back(C);
+ }
+ /// @}}
+
+ /// \brief Register a callback for a top-level pipeline entry.
+ ///
+ /// If the PassManager type is not given at the top level of the pipeline
+ /// text, this Callback should be used to determine the appropriate stack of
+ /// PassManagers and populate the passed ModulePassManager.
+ void registerParseTopLevelPipelineCallback(
+ const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
+ bool VerifyEachPass, bool DebugLogging)> &C) {
+ TopLevelPipelineParsingCallbacks.push_back(C);
+ }
+private:
static Optional<std::vector<PipelineElement>>
parsePipelineText(StringRef Text);
@@ -382,7 +547,106 @@ private:
bool parseModulePassPipeline(ModulePassManager &MPM,
ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
+
+ void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
+ OptimizationLevel Level, bool RunProfileGen,
+ std::string ProfileGenFile,
+ std::string ProfileUseFile);
+
+ void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
+
+ // Extension Point callbacks
+ SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+ PeepholeEPCallbacks;
+ SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
+ LateLoopOptimizationsEPCallbacks;
+ SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
+ LoopOptimizerEndEPCallbacks;
+ SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+ ScalarOptimizerLateEPCallbacks;
+ SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
+ CGSCCOptimizerLateEPCallbacks;
+ SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+ VectorizerStartEPCallbacks;
+ // Module callbacks
+ SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
+ ModuleAnalysisRegistrationCallbacks;
+ SmallVector<std::function<bool(StringRef, ModulePassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ ModulePipelineParsingCallbacks;
+ SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
+ bool VerifyEachPass, bool DebugLogging)>,
+ 2>
+ TopLevelPipelineParsingCallbacks;
+ // CGSCC callbacks
+ SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
+ CGSCCAnalysisRegistrationCallbacks;
+ SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ CGSCCPipelineParsingCallbacks;
+ // Function callbacks
+ SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
+ FunctionAnalysisRegistrationCallbacks;
+ SmallVector<std::function<bool(StringRef, FunctionPassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ FunctionPipelineParsingCallbacks;
+ // Loop callbacks
+ SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
+ LoopAnalysisRegistrationCallbacks;
+ SmallVector<std::function<bool(StringRef, LoopPassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ LoopPipelineParsingCallbacks;
+ // AA callbacks
+ SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
+ AAParsingCallbacks;
};
+
+/// This utility template takes care of adding require<> and invalidate<>
+/// passes for an analysis to a given \c PassManager. It is intended to be used
+/// during parsing of a pass pipeline when parsing a single PipelineName.
+/// When registering a new function analysis FancyAnalysis with the pass
+/// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
+/// like this:
+///
+/// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
+/// ArrayRef<PipelineElement> P) {
+/// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
+/// FPM))
+/// return true;
+/// return false;
+/// }
+template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
+ typename... ExtraArgTs>
+bool parseAnalysisUtilityPasses(
+ StringRef AnalysisName, StringRef PipelineName,
+ PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
+ if (!PipelineName.endswith(">"))
+ return false;
+ // See if this is an invalidate<> pass name
+ if (PipelineName.startswith("invalidate<")) {
+ PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
+ if (PipelineName != AnalysisName)
+ return false;
+ PM.addPass(InvalidateAnalysisPass<AnalysisT>());
+ return true;
+ }
+
+ // See if this is a require<> pass name
+ if (PipelineName.startswith("require<")) {
+ PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
+ if (PipelineName != AnalysisName)
+ return false;
+ PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
+ ExtraArgTs...>());
+ return true;
+ }
+
+ return false;
+}
}
#endif
diff --git a/include/llvm/ProfileData/InstrProf.h b/include/llvm/ProfileData/InstrProf.h
index a6b2850ccd22..772187f70153 100644
--- a/include/llvm/ProfileData/InstrProf.h
+++ b/include/llvm/ProfileData/InstrProf.h
@@ -249,9 +249,8 @@ void annotateValueSite(Module &M, Instruction &Inst,
/// Same as the above interface but using an ArrayRef, as well as \p Sum.
void annotateValueSite(Module &M, Instruction &Inst,
- ArrayRef<InstrProfValueData> VDs,
- uint64_t Sum, InstrProfValueKind ValueKind,
- uint32_t MaxMDCount);
+ ArrayRef<InstrProfValueData> VDs, uint64_t Sum,
+ InstrProfValueKind ValueKind, uint32_t MaxMDCount);
/// Extract the value profile data from \p Inst which is annotated with
/// value profile meta data. Return false if there is no value data annotated,
@@ -582,34 +581,27 @@ struct InstrProfValueSiteRecord {
/// Merge data from another InstrProfValueSiteRecord
/// Optionally scale merged counts by \p Weight.
- void merge(SoftInstrProfErrors &SIPE, InstrProfValueSiteRecord &Input,
- uint64_t Weight = 1);
+ void merge(InstrProfValueSiteRecord &Input, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn);
/// Scale up value profile data counts.
- void scale(SoftInstrProfErrors &SIPE, uint64_t Weight);
+ void scale(uint64_t Weight, function_ref<void(instrprof_error)> Warn);
};
/// Profiling information for a single function.
struct InstrProfRecord {
- StringRef Name;
- uint64_t Hash;
std::vector<uint64_t> Counts;
- SoftInstrProfErrors SIPE;
InstrProfRecord() = default;
- InstrProfRecord(StringRef Name, uint64_t Hash, std::vector<uint64_t> Counts)
- : Name(Name), Hash(Hash), Counts(std::move(Counts)) {}
+ InstrProfRecord(std::vector<uint64_t> Counts) : Counts(std::move(Counts)) {}
InstrProfRecord(InstrProfRecord &&) = default;
InstrProfRecord(const InstrProfRecord &RHS)
- : Name(RHS.Name), Hash(RHS.Hash), Counts(RHS.Counts), SIPE(RHS.SIPE),
+ : Counts(RHS.Counts),
ValueData(RHS.ValueData
? llvm::make_unique<ValueProfData>(*RHS.ValueData)
: nullptr) {}
InstrProfRecord &operator=(InstrProfRecord &&) = default;
InstrProfRecord &operator=(const InstrProfRecord &RHS) {
- Name = RHS.Name;
- Hash = RHS.Hash;
Counts = RHS.Counts;
- SIPE = RHS.SIPE;
if (!RHS.ValueData) {
ValueData = nullptr;
return *this;
@@ -626,7 +618,6 @@ struct InstrProfRecord {
/// Return the number of value profile kinds with non-zero number
/// of profile sites.
inline uint32_t getNumValueKinds() const;
-
/// Return the number of instrumented sites for ValueKind.
inline uint32_t getNumValueSites(uint32_t ValueKind) const;
@@ -661,11 +652,12 @@ struct InstrProfRecord {
/// Merge the counts in \p Other into this one.
/// Optionally scale merged counts by \p Weight.
- void merge(InstrProfRecord &Other, uint64_t Weight = 1);
+ void merge(InstrProfRecord &Other, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn);
/// Scale up profile counts (including value profile data) by
/// \p Weight.
- void scale(uint64_t Weight);
+ void scale(uint64_t Weight, function_ref<void(instrprof_error)> Warn);
/// Sort value profile data (per site) by count.
void sortValueData() {
@@ -683,9 +675,6 @@ struct InstrProfRecord {
/// Clear value data entries
void clearValueData() { ValueData = nullptr; }
- /// Get the error contained within the record's soft error counter.
- Error takeError() { return SIPE.takeError(); }
-
private:
struct ValueProfData {
std::vector<InstrProfValueSiteRecord> IndirectCallSites;
@@ -737,11 +726,23 @@ private:
// Merge Value Profile data from Src record to this record for ValueKind.
// Scale merged value counts by \p Weight.
- void mergeValueProfData(uint32_t ValueKind, InstrProfRecord &Src,
- uint64_t Weight);
+ void mergeValueProfData(uint32_t ValkeKind, InstrProfRecord &Src,
+ uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn);
// Scale up value profile data count.
- void scaleValueProfData(uint32_t ValueKind, uint64_t Weight);
+ void scaleValueProfData(uint32_t ValueKind, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn);
+};
+
+struct NamedInstrProfRecord : InstrProfRecord {
+ StringRef Name;
+ uint64_t Hash;
+
+ NamedInstrProfRecord() = default;
+ NamedInstrProfRecord(StringRef Name, uint64_t Hash,
+ std::vector<uint64_t> Counts)
+ : InstrProfRecord(std::move(Counts)), Name(Name), Hash(Hash) {}
};
uint32_t InstrProfRecord::getNumValueKinds() const {
@@ -753,11 +754,8 @@ uint32_t InstrProfRecord::getNumValueKinds() const {
uint32_t InstrProfRecord::getNumValueData(uint32_t ValueKind) const {
uint32_t N = 0;
- const std::vector<InstrProfValueSiteRecord> &SiteRecords =
- getValueSitesForKind(ValueKind);
- for (auto &SR : SiteRecords) {
+ for (auto &SR : getValueSitesForKind(ValueKind))
N += SR.ValueData.size();
- }
return N;
}
diff --git a/include/llvm/ProfileData/InstrProfReader.h b/include/llvm/ProfileData/InstrProfReader.h
index 8163ca159209..424360e0f765 100644
--- a/include/llvm/ProfileData/InstrProfReader.h
+++ b/include/llvm/ProfileData/InstrProfReader.h
@@ -40,9 +40,9 @@ class InstrProfReader;
/// A file format agnostic iterator over profiling data.
class InstrProfIterator : public std::iterator<std::input_iterator_tag,
- InstrProfRecord> {
+ NamedInstrProfRecord> {
InstrProfReader *Reader = nullptr;
- InstrProfRecord Record;
+ value_type Record;
void Increment();
@@ -53,12 +53,12 @@ public:
InstrProfIterator &operator++() { Increment(); return *this; }
bool operator==(const InstrProfIterator &RHS) { return Reader == RHS.Reader; }
bool operator!=(const InstrProfIterator &RHS) { return Reader != RHS.Reader; }
- InstrProfRecord &operator*() { return Record; }
- InstrProfRecord *operator->() { return &Record; }
+ value_type &operator*() { return Record; }
+ value_type *operator->() { return &Record; }
};
/// Base class and interface for reading profiling data of any known instrprof
-/// format. Provides an iterator over InstrProfRecords.
+/// format. Provides an iterator over NamedInstrProfRecords.
class InstrProfReader {
instrprof_error LastError = instrprof_error::success;
@@ -70,7 +70,7 @@ public:
virtual Error readHeader() = 0;
/// Read a single record.
- virtual Error readNextRecord(InstrProfRecord &Record) = 0;
+ virtual Error readNextRecord(NamedInstrProfRecord &Record) = 0;
/// Iterator over profile data.
InstrProfIterator begin() { return InstrProfIterator(this); }
@@ -161,7 +161,7 @@ public:
Error readHeader() override;
/// Read a single record.
- Error readNextRecord(InstrProfRecord &Record) override;
+ Error readNextRecord(NamedInstrProfRecord &Record) override;
InstrProfSymtab &getSymtab() override {
assert(Symtab.get());
@@ -209,7 +209,7 @@ public:
static bool hasFormat(const MemoryBuffer &DataBuffer);
Error readHeader() override;
- Error readNextRecord(InstrProfRecord &Record) override;
+ Error readNextRecord(NamedInstrProfRecord &Record) override;
bool isIRLevelProfile() const override {
return (Version & VARIANT_MASK_IR_PROF) != 0;
@@ -243,8 +243,8 @@ private:
return 7 & (sizeof(uint64_t) - SizeInBytes % sizeof(uint64_t));
}
- Error readName(InstrProfRecord &Record);
- Error readFuncHash(InstrProfRecord &Record);
+ Error readName(NamedInstrProfRecord &Record);
+ Error readFuncHash(NamedInstrProfRecord &Record);
Error readRawCounts(InstrProfRecord &Record);
Error readValueProfilingData(InstrProfRecord &Record);
bool atEnd() const { return Data == DataEnd; }
@@ -281,7 +281,7 @@ enum class HashT : uint32_t;
/// Trait for lookups into the on-disk hash table for the binary instrprof
/// format.
class InstrProfLookupTrait {
- std::vector<InstrProfRecord> DataBuffer;
+ std::vector<NamedInstrProfRecord> DataBuffer;
IndexedInstrProf::HashT HashType;
unsigned FormatVersion;
// Endianness of the input value profile data.
@@ -293,7 +293,7 @@ public:
InstrProfLookupTrait(IndexedInstrProf::HashT HashType, unsigned FormatVersion)
: HashType(HashType), FormatVersion(FormatVersion) {}
- using data_type = ArrayRef<InstrProfRecord>;
+ using data_type = ArrayRef<NamedInstrProfRecord>;
using internal_key_type = StringRef;
using external_key_type = StringRef;
@@ -334,11 +334,11 @@ struct InstrProfReaderIndexBase {
// Read all the profile records with the same key pointed to the current
// iterator.
- virtual Error getRecords(ArrayRef<InstrProfRecord> &Data) = 0;
+ virtual Error getRecords(ArrayRef<NamedInstrProfRecord> &Data) = 0;
// Read all the profile records with the key equal to FuncName
virtual Error getRecords(StringRef FuncName,
- ArrayRef<InstrProfRecord> &Data) = 0;
+ ArrayRef<NamedInstrProfRecord> &Data) = 0;
virtual void advanceToNextKey() = 0;
virtual bool atEnd() const = 0;
virtual void setValueProfDataEndianness(support::endianness Endianness) = 0;
@@ -364,9 +364,9 @@ public:
IndexedInstrProf::HashT HashType, uint64_t Version);
~InstrProfReaderIndex() override = default;
- Error getRecords(ArrayRef<InstrProfRecord> &Data) override;
+ Error getRecords(ArrayRef<NamedInstrProfRecord> &Data) override;
Error getRecords(StringRef FuncName,
- ArrayRef<InstrProfRecord> &Data) override;
+ ArrayRef<NamedInstrProfRecord> &Data) override;
void advanceToNextKey() override { RecordIterator++; }
bool atEnd() const override {
@@ -419,10 +419,9 @@ public:
/// Read the file header.
Error readHeader() override;
/// Read a single record.
- Error readNextRecord(InstrProfRecord &Record) override;
+ Error readNextRecord(NamedInstrProfRecord &Record) override;
- /// Return the pointer to InstrProfRecord associated with FuncName
- /// and FuncHash
+ /// Return the NamedInstrProfRecord associated with FuncName and FuncHash
Expected<InstrProfRecord> getInstrProfRecord(StringRef FuncName,
uint64_t FuncHash);
diff --git a/include/llvm/ProfileData/InstrProfWriter.h b/include/llvm/ProfileData/InstrProfWriter.h
index fff10af30295..8107ab386fe2 100644
--- a/include/llvm/ProfileData/InstrProfWriter.h
+++ b/include/llvm/ProfileData/InstrProfWriter.h
@@ -33,7 +33,7 @@ class raw_fd_ostream;
class InstrProfWriter {
public:
- using ProfilingData = SmallDenseMap<uint64_t, InstrProfRecord, 1>;
+ using ProfilingData = SmallDenseMap<uint64_t, InstrProfRecord>;
enum ProfKind { PF_Unknown = 0, PF_FE, PF_IRLevel };
private:
@@ -50,10 +50,15 @@ public:
/// Add function counts for the given function. If there are already counts
/// for this function and the hash and number of counts match, each counter is
/// summed. Optionally scale counts by \p Weight.
- Error addRecord(InstrProfRecord &&I, uint64_t Weight = 1);
+ void addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
+ function_ref<void(Error)> Warn);
+ void addRecord(NamedInstrProfRecord &&I, function_ref<void(Error)> Warn) {
+ addRecord(std::move(I), 1, Warn);
+ }
/// Merge existing function counts from the given writer.
- Error mergeRecordsFromWriter(InstrProfWriter &&IPW);
+ void mergeRecordsFromWriter(InstrProfWriter &&IPW,
+ function_ref<void(Error)> Warn);
/// Write the profile to \c OS
void write(raw_fd_ostream &OS);
@@ -62,7 +67,8 @@ public:
Error writeText(raw_fd_ostream &OS);
/// Write \c Record in text format to \c OS
- static void writeRecordInText(const InstrProfRecord &Record,
+ static void writeRecordInText(StringRef Name, uint64_t Hash,
+ const InstrProfRecord &Counters,
InstrProfSymtab &Symtab, raw_fd_ostream &OS);
/// Write the profile, returning the raw data. For testing.
@@ -85,6 +91,8 @@ public:
void setOutputSparse(bool Sparse);
private:
+ void addRecord(StringRef Name, uint64_t Hash, InstrProfRecord &&I,
+ uint64_t Weight, function_ref<void(Error)> Warn);
bool shouldEncodeData(const ProfilingData &PD);
void writeImpl(ProfOStream &OS);
};
diff --git a/include/llvm/ProfileData/ProfileCommon.h b/include/llvm/ProfileData/ProfileCommon.h
index 987e3160ccae..51b065bcdb70 100644
--- a/include/llvm/ProfileData/ProfileCommon.h
+++ b/include/llvm/ProfileData/ProfileCommon.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/ProfileSummary.h"
+#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Error.h"
#include <algorithm>
#include <cstdint>
@@ -27,8 +28,6 @@
namespace llvm {
-struct InstrProfRecord;
-
namespace sampleprof {
class FunctionSamples;
diff --git a/include/llvm/Support/BlockFrequency.h b/include/llvm/Support/BlockFrequency.h
index 1b45cc52973f..2e75cbdd29c1 100644
--- a/include/llvm/Support/BlockFrequency.h
+++ b/include/llvm/Support/BlockFrequency.h
@@ -71,6 +71,10 @@ public:
bool operator>=(BlockFrequency RHS) const {
return Frequency >= RHS.Frequency;
}
+
+ bool operator==(BlockFrequency RHS) const {
+ return Frequency == RHS.Frequency;
+ }
};
}
diff --git a/include/llvm/Support/Compiler.h b/include/llvm/Support/Compiler.h
index be9e46540016..b19e37235df5 100644
--- a/include/llvm/Support/Compiler.h
+++ b/include/llvm/Support/Compiler.h
@@ -493,4 +493,14 @@ void AnnotateIgnoreWritesEnd(const char *file, int line);
#define LLVM_THREAD_LOCAL
#endif
+/// \macro LLVM_ENABLE_EXCEPTIONS
+/// \brief Whether LLVM is built with exception support.
+#if __has_feature(cxx_exceptions)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#elif defined(__GNUC__) && defined(__EXCEPTIONS)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#elif defined(_MSC_VER) && defined(_CPPUNWIND)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#endif
+
#endif
diff --git a/include/llvm/Support/DynamicLibrary.h b/include/llvm/Support/DynamicLibrary.h
index a8874a10d461..469d5dfad062 100644
--- a/include/llvm/Support/DynamicLibrary.h
+++ b/include/llvm/Support/DynamicLibrary.h
@@ -88,6 +88,22 @@ namespace sys {
return !getPermanentLibrary(Filename, ErrMsg).isValid();
}
+ enum SearchOrdering {
+ /// SO_Linker - Search as a call to dlsym(dlopen(NULL)) would when
+ /// DynamicLibrary::getPermanentLibrary(NULL) has been called or
+ /// search the list of explcitly loaded symbols if not.
+ SO_Linker,
+ /// SO_LoadedFirst - Search all loaded libraries, then as SO_Linker would.
+ SO_LoadedFirst,
+ /// SO_LoadedLast - Search as SO_Linker would, then loaded libraries.
+ /// Only useful to search if libraries with RTLD_LOCAL have been added.
+ SO_LoadedLast,
+ /// SO_LoadOrder - Or this in to search libraries in the ordered loaded.
+ /// The default bahaviour is to search loaded libraries in reverse.
+ SO_LoadOrder = 4
+ };
+ static SearchOrdering SearchOrder; // = SO_Linker
+
/// This function will search through all previously loaded dynamic
/// libraries for the symbol \p symbolName. If it is found, the address of
/// that symbol is returned. If not, null is returned. Note that this will
diff --git a/include/llvm/Support/ErrorHandling.h b/include/llvm/Support/ErrorHandling.h
index 7c1edd801571..b45f6348390e 100644
--- a/include/llvm/Support/ErrorHandling.h
+++ b/include/llvm/Support/ErrorHandling.h
@@ -78,12 +78,48 @@ LLVM_ATTRIBUTE_NORETURN void report_fatal_error(StringRef reason,
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const Twine &reason,
bool gen_crash_diag = true);
- /// This function calls abort(), and prints the optional message to stderr.
- /// Use the llvm_unreachable macro (that adds location info), instead of
- /// calling this function directly.
- LLVM_ATTRIBUTE_NORETURN void
- llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr,
- unsigned line=0);
+/// Installs a new bad alloc error handler that should be used whenever a
+/// bad alloc error, e.g. failing malloc/calloc, is encountered by LLVM.
+///
+/// The user can install a bad alloc handler, in order to define the behavior
+/// in case of failing allocations, e.g. throwing an exception. Note that this
+/// handler must not trigger any additional allocations itself.
+///
+/// If no error handler is installed the default is to print the error message
+/// to stderr, and call exit(1). If an error handler is installed then it is
+/// the handler's responsibility to log the message, it will no longer be
+/// printed to stderr. If the error handler returns, then exit(1) will be
+/// called.
+///
+///
+/// \param user_data - An argument which will be passed to the installed error
+/// handler.
+void install_bad_alloc_error_handler(fatal_error_handler_t handler,
+ void *user_data = nullptr);
+
+/// Restores default bad alloc error handling behavior.
+void remove_bad_alloc_error_handler();
+
+/// Reports a bad alloc error, calling any user defined bad alloc
+/// error handler. In contrast to the generic 'report_fatal_error'
+/// functions, this function is expected to return, e.g. the user
+/// defined error handler throws an exception.
+///
+/// Note: When throwing an exception in the bad alloc handler, make sure that
+/// the following unwind succeeds, e.g. do not trigger additional allocations
+/// in the unwind chain.
+///
+/// If no error handler is installed (default), then a bad_alloc exception
+/// is thrown if LLVM is compiled with exception support, otherwise an assertion
+/// is called.
+void report_bad_alloc_error(const char *Reason, bool GenCrashDiag = true);
+
+/// This function calls abort(), and prints the optional message to stderr.
+/// Use the llvm_unreachable macro (that adds location info), instead of
+/// calling this function directly.
+LLVM_ATTRIBUTE_NORETURN void
+llvm_unreachable_internal(const char *msg = nullptr, const char *file = nullptr,
+ unsigned line = 0);
}
/// Marks that the current location is not supposed to be reachable.
diff --git a/include/llvm/Support/GenericDomTreeConstruction.h b/include/llvm/Support/GenericDomTreeConstruction.h
index 9edf03aa3621..a0fec668e05c 100644
--- a/include/llvm/Support/GenericDomTreeConstruction.h
+++ b/include/llvm/Support/GenericDomTreeConstruction.h
@@ -32,6 +32,20 @@
namespace llvm {
namespace DomTreeBuilder {
+template <typename NodePtr, bool Inverse>
+struct ChildrenGetter {
+ static auto Get(NodePtr N) -> decltype(reverse(children<NodePtr>(N))) {
+ return reverse(children<NodePtr>(N));
+ }
+};
+
+template <typename NodePtr>
+struct ChildrenGetter<NodePtr, true> {
+ static auto Get(NodePtr N) -> decltype(inverse_children<NodePtr>(N)) {
+ return inverse_children<NodePtr>(N);
+ }
+};
+
// Information record used by Semi-NCA during tree construction.
template <typename NodeT>
struct SemiNCAInfo {
@@ -45,6 +59,7 @@ struct SemiNCAInfo {
unsigned Semi = 0;
NodePtr Label = nullptr;
NodePtr IDom = nullptr;
+ SmallVector<NodePtr, 2> ReverseChildren;
};
std::vector<NodePtr> NumToNode;
@@ -79,66 +94,49 @@ struct SemiNCAInfo {
.get();
}
- // External storage for depth first iterator that reuses the info lookup map
- // SemiNCAInfo already has. We don't have a set, but a map instead, so we are
- // converting the one argument insert calls.
- struct df_iterator_dom_storage {
- public:
- using BaseSet = decltype(NodeToInfo);
- df_iterator_dom_storage(BaseSet &Storage) : Storage(Storage) {}
-
- using iterator = typename BaseSet::iterator;
- std::pair<iterator, bool> insert(NodePtr N) {
- return Storage.insert({N, InfoRec()});
- }
- void completed(NodePtr) {}
-
- private:
- BaseSet &Storage;
- };
-
- df_iterator_dom_storage getStorage() { return {NodeToInfo}; }
+ static bool AlwaysDescend(NodePtr, NodePtr) { return true; }
- unsigned runReverseDFS(NodePtr V, unsigned N) {
- auto DFStorage = getStorage();
+ // Custom DFS implementation which can skip nodes based on a provided
+ // predicate. It also collects ReverseChildren so that we don't have to spend
+ // time getting predecessors in SemiNCA.
+ template <bool Inverse, typename DescendCondition>
+ unsigned runDFS(NodePtr V, unsigned LastNum, DescendCondition Condition,
+ unsigned AttachToNum) {
+ assert(V);
+ SmallVector<NodePtr, 64> WorkList = {V};
+ if (NodeToInfo.count(V) != 0) NodeToInfo[V].Parent = AttachToNum;
- bool IsChildOfArtificialExit = (N != 0);
- for (auto I = idf_ext_begin(V, DFStorage), E = idf_ext_end(V, DFStorage);
- I != E; ++I) {
- NodePtr BB = *I;
+ while (!WorkList.empty()) {
+ const NodePtr BB = WorkList.pop_back_val();
auto &BBInfo = NodeToInfo[BB];
- BBInfo.DFSNum = BBInfo.Semi = ++N;
+
+ // Visited nodes always have positive DFS numbers.
+ if (BBInfo.DFSNum != 0) continue;
+ BBInfo.DFSNum = BBInfo.Semi = ++LastNum;
BBInfo.Label = BB;
- // Set the parent to the top of the visited stack. The stack includes us,
- // and is 1 based, so we subtract to account for both of these.
- if (I.getPathLength() > 1)
- BBInfo.Parent = NodeToInfo[I.getPath(I.getPathLength() - 2)].DFSNum;
- NumToNode.push_back(BB); // NumToNode[n] = V;
+ NumToNode.push_back(BB);
+
+ for (const NodePtr Succ : ChildrenGetter<NodePtr, Inverse>::Get(BB)) {
+ const auto SIT = NodeToInfo.find(Succ);
+ // Don't visit nodes more than once but remember to collect
+ // RerverseChildren.
+ if (SIT != NodeToInfo.end() && SIT->second.DFSNum != 0) {
+ if (Succ != BB) SIT->second.ReverseChildren.push_back(BB);
+ continue;
+ }
- if (IsChildOfArtificialExit)
- BBInfo.Parent = 1;
+ if (!Condition(BB, Succ)) continue;
- IsChildOfArtificialExit = false;
+ // It's fine to add Succ to the map, because we know that it will be
+ // visited later.
+ auto &SuccInfo = NodeToInfo[Succ];
+ WorkList.push_back(Succ);
+ SuccInfo.Parent = LastNum;
+ SuccInfo.ReverseChildren.push_back(BB);
+ }
}
- return N;
- }
-
- unsigned runForwardDFS(NodePtr V, unsigned N) {
- auto DFStorage = getStorage();
- for (auto I = df_ext_begin(V, DFStorage), E = df_ext_end(V, DFStorage);
- I != E; ++I) {
- NodePtr BB = *I;
- auto &BBInfo = NodeToInfo[BB];
- BBInfo.DFSNum = BBInfo.Semi = ++N;
- BBInfo.Label = BB;
- // Set the parent to the top of the visited stack. The stack includes us,
- // and is 1 based, so we subtract to account for both of these.
- if (I.getPathLength() > 1)
- BBInfo.Parent = NodeToInfo[I.getPath(I.getPathLength() - 2)].DFSNum;
- NumToNode.push_back(BB); // NumToNode[n] = V;
- }
- return N;
+ return LastNum;
}
NodePtr eval(NodePtr VIn, unsigned LastLinked) {
@@ -181,31 +179,14 @@ struct SemiNCAInfo {
template <typename NodeType>
void runSemiNCA(DomTreeT &DT, unsigned NumBlocks) {
- unsigned N = 0;
- NumToNode.push_back(nullptr);
-
- bool MultipleRoots = (DT.Roots.size() > 1);
- if (MultipleRoots) {
- auto &BBInfo = NodeToInfo[nullptr];
- BBInfo.DFSNum = BBInfo.Semi = ++N;
- BBInfo.Label = nullptr;
-
- NumToNode.push_back(nullptr); // NumToNode[n] = V;
- }
-
// Step #1: Number blocks in depth-first order and initialize variables used
// in later stages of the algorithm.
- if (DT.isPostDominator()){
- for (unsigned i = 0, e = static_cast<unsigned>(DT.Roots.size());
- i != e; ++i)
- N = runReverseDFS(DT.Roots[i], N);
- } else {
- N = runForwardDFS(DT.Roots[0], N);
- }
+ const unsigned N = doFullDFSWalk(DT, AlwaysDescend);
// It might be that some blocks did not get a DFS number (e.g., blocks of
// infinite loops). In these cases an artificial exit node is required.
- MultipleRoots |= (DT.isPostDominator() && N != NumBlocks);
+ const bool MultipleRoots =
+ DT.Roots.size() > 1 || (DT.isPostDominator() && N != NumBlocks);
// Initialize IDoms to spanning tree parents.
for (unsigned i = 1; i <= N; ++i) {
@@ -221,7 +202,7 @@ struct SemiNCAInfo {
// Initialize the semi dominator to point to the parent node.
WInfo.Semi = WInfo.Parent;
- for (const auto &N : inverse_children<NodeType>(W))
+ for (const auto &N : WInfo.ReverseChildren)
if (NodeToInfo.count(N)) { // Only if this predecessor is reachable!
unsigned SemiU = NodeToInfo[eval(N, i + 1)].Semi;
if (SemiU < WInfo.Semi)
@@ -279,14 +260,27 @@ struct SemiNCAInfo {
}
}
- void doFullDFSWalk(const DomTreeT &DT) {
- NumToNode.push_back(nullptr);
+ template <typename DescendCondition>
+ unsigned doFullDFSWalk(const DomTreeT &DT, DescendCondition DC) {
unsigned Num = 0;
- for (auto *Root : DT.Roots)
- if (!DT.isPostDominator())
- Num = runForwardDFS(Root, Num);
- else
- Num = runReverseDFS(Root, Num);
+ NumToNode.push_back(nullptr);
+
+ if (DT.Roots.size() > 1) {
+ auto &BBInfo = NodeToInfo[nullptr];
+ BBInfo.DFSNum = BBInfo.Semi = ++Num;
+ BBInfo.Label = nullptr;
+
+ NumToNode.push_back(nullptr); // NumToNode[n] = V;
+ }
+
+ if (DT.isPostDominator()) {
+ for (auto *Root : DT.Roots) Num = runDFS<true>(Root, Num, DC, 1);
+ } else {
+ assert(DT.Roots.size() == 1);
+ Num = runDFS<false>(DT.Roots[0], Num, DC, Num);
+ }
+
+ return Num;
}
static void PrintBlockOrNullptr(raw_ostream &O, NodePtr Obj) {
@@ -299,7 +293,7 @@ struct SemiNCAInfo {
// Checks if the tree contains all reachable nodes in the input graph.
bool verifyReachability(const DomTreeT &DT) {
clear();
- doFullDFSWalk(DT);
+ doFullDFSWalk(DT, AlwaysDescend);
for (auto &NodeToTN : DT.DomTreeNodes) {
const TreeNodePtr TN = NodeToTN.second.get();
@@ -356,7 +350,7 @@ struct SemiNCAInfo {
// NCD(From, To) == IDom(To) or To.
bool verifyNCD(const DomTreeT &DT) {
clear();
- doFullDFSWalk(DT);
+ doFullDFSWalk(DT, AlwaysDescend);
for (auto &BlockToInfo : NodeToInfo) {
auto &Info = BlockToInfo.second;
@@ -440,8 +434,9 @@ struct SemiNCAInfo {
if (!BB || TN->getChildren().empty()) continue;
clear();
- NodeToInfo.insert({BB, {}});
- doFullDFSWalk(DT);
+ doFullDFSWalk(DT, [BB](NodePtr From, NodePtr To) {
+ return From != BB && To != BB;
+ });
for (TreeNodePtr Child : TN->getChildren())
if (NodeToInfo.count(Child->getBlock()) != 0) {
@@ -473,8 +468,10 @@ struct SemiNCAInfo {
const auto &Siblings = TN->getChildren();
for (const TreeNodePtr N : Siblings) {
clear();
- NodeToInfo.insert({N->getBlock(), {}});
- doFullDFSWalk(DT);
+ NodePtr BBN = N->getBlock();
+ doFullDFSWalk(DT, [BBN](NodePtr From, NodePtr To) {
+ return From != BBN && To != BBN;
+ });
for (const TreeNodePtr S : Siblings) {
if (S == N) continue;
diff --git a/include/llvm/Support/ReverseIteration.h b/include/llvm/Support/ReverseIteration.h
new file mode 100644
index 000000000000..cb97b60f06dd
--- /dev/null
+++ b/include/llvm/Support/ReverseIteration.h
@@ -0,0 +1,17 @@
+#ifndef LLVM_SUPPORT_REVERSEITERATION_H
+#define LLVM_SUPPORT_REVERSEITERATION_H
+
+#include "llvm/Config/abi-breaking.h"
+
+namespace llvm {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+template <class T = void> struct ReverseIterate { static bool value; };
+#if LLVM_ENABLE_REVERSE_ITERATION
+template <class T> bool ReverseIterate<T>::value = true;
+#else
+template <class T> bool ReverseIterate<T>::value = false;
+#endif
+#endif
+}
+
+#endif
diff --git a/include/llvm/Support/UnicodeCharRanges.h b/include/llvm/Support/UnicodeCharRanges.h
index d4d4d8eb84a4..4c655833b396 100644
--- a/include/llvm/Support/UnicodeCharRanges.h
+++ b/include/llvm/Support/UnicodeCharRanges.h
@@ -18,11 +18,11 @@
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#define DEBUG_TYPE "unicode"
+
namespace llvm {
namespace sys {
-#define DEBUG_TYPE "unicode"
-
/// \brief Represents a closed range of Unicode code points [Lower, Upper].
struct UnicodeCharRange {
uint32_t Lower;
@@ -99,10 +99,9 @@ private:
const CharRanges Ranges;
};
-#undef DEBUG_TYPE // "unicode"
-
} // namespace sys
} // namespace llvm
+#undef DEBUG_TYPE // "unicode"
#endif // LLVM_SUPPORT_UNICODECHARRANGES_H
diff --git a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index 3a3118139bcb..178b08d7b8b7 100644
--- a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -64,6 +64,7 @@ def : GINodeEquiv<G_FREM, frem>;
def : GINodeEquiv<G_FPOW, fpow>;
def : GINodeEquiv<G_FEXP2, fexp2>;
def : GINodeEquiv<G_FLOG2, flog2>;
+def : GINodeEquiv<G_INTRINSIC, intrinsic_wo_chain>;
def : GINodeEquiv<G_BR, br>;
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index 2fc3ec996e7f..1843a2eed9bf 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -1545,6 +1545,16 @@ public:
return None;
}
+ /// Return an array that contains the MMO target flag values and their
+ /// names.
+ ///
+ /// MIR Serialization is able to serialize only the MMO target flags that are
+ /// defined by this method.
+ virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
+ getSerializableMachineMemOperandTargetFlags() const {
+ return None;
+ }
+
/// Determines whether \p Inst is a tail call instruction. Override this
/// method on targets that do not properly set MCID::Return and MCID::Call on
/// tail call instructions."
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 964d6314b127..60a03bdc182d 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -415,7 +415,8 @@ public:
virtual bool mergeStoresAfterLegalization() const { return false; }
/// Returns if it's reasonable to merge stores to MemVT size.
- virtual bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT) const {
+ virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
+ const SelectionDAG &DAG) const {
return true;
}
@@ -2726,6 +2727,18 @@ public:
return true;
}
+ // Return true if it is profitable to combine a BUILD_VECTOR to a TRUNCATE.
+ // Example of such a combine:
+ // v4i32 build_vector((extract_elt V, 0),
+ // (extract_elt V, 2),
+ // (extract_elt V, 4),
+ // (extract_elt V, 6))
+ // -->
+ // v4i32 truncate (bitcast V to v4i64)
+ virtual bool isDesirableToCombineBuildVectorToTruncate() const {
+ return false;
+ }
+
/// Return true if the target has native support for the specified value type
/// and it is 'desirable' to use the type for the given node type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
@@ -2815,6 +2828,9 @@ public:
// TargetLowering::LowerCall that perform tail call conversions.
bool IsTailCall = false;
+ // Is Call lowering done post SelectionDAG type legalization.
+ bool IsPostTypeLegalization = false;
+
unsigned NumFixedArgs = -1;
CallingConv::ID CallConv = CallingConv::C;
SDValue Callee;
@@ -2937,6 +2953,11 @@ public:
return *this;
}
+ CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
+ IsPostTypeLegalization = Value;
+ return *this;
+ }
+
ArgListTy &getArgs() {
return Args;
}
@@ -3055,6 +3076,13 @@ public:
return Chain;
}
+ /// This callback is used to inspect load/store instructions and add
+ /// target-specific MachineMemOperand flags to them. The default
+ /// implementation does nothing.
+ virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const {
+ return MachineMemOperand::MONone;
+ }
+
/// This callback is invoked by the type legalizer to legalize nodes with an
/// illegal operand type but legal result types. It replaces the
/// LowerOperation callback in the type Legalizer. The reason we can not do
diff --git a/include/llvm/Transforms/Scalar/GVN.h b/include/llvm/Transforms/Scalar/GVN.h
index 4c585a20021c..f25ab40640df 100644
--- a/include/llvm/Transforms/Scalar/GVN.h
+++ b/include/llvm/Transforms/Scalar/GVN.h
@@ -68,21 +68,6 @@ public:
class ValueTable {
DenseMap<Value *, uint32_t> valueNumbering;
DenseMap<Expression, uint32_t> expressionNumbering;
-
- // Expressions is the vector of Expression. ExprIdx is the mapping from
- // value number to the index of Expression in Expressions. We use it
- // instead of a DenseMap because filling such mapping is faster than
- // filling a DenseMap and the compile time is a little better.
- uint32_t nextExprNumber;
- std::vector<Expression> Expressions;
- std::vector<uint32_t> ExprIdx;
- // Value number to PHINode mapping. Used for phi-translate in scalarpre.
- DenseMap<uint32_t, PHINode *> NumberingPhi;
- // Cache for phi-translate in scalarpre.
- typedef DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>
- PhiTranslateMap;
- PhiTranslateMap PhiTranslateTable;
-
AliasAnalysis *AA;
MemoryDependenceResults *MD;
DominatorTree *DT;
@@ -94,10 +79,6 @@ public:
Value *LHS, Value *RHS);
Expression createExtractvalueExpr(ExtractValueInst *EI);
uint32_t lookupOrAddCall(CallInst *C);
- uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
- uint32_t Num, GVN &Gvn);
- std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
- bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
public:
ValueTable();
@@ -106,11 +87,9 @@ public:
~ValueTable();
uint32_t lookupOrAdd(Value *V);
- uint32_t lookup(Value *V, bool Verify = true) const;
+ uint32_t lookup(Value *V) const;
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
Value *LHS, Value *RHS);
- uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
- uint32_t Num, GVN &Gvn);
bool exists(Value *V) const;
void add(Value *V, uint32_t num);
void clear();
@@ -152,10 +131,6 @@ private:
SmallMapVector<llvm::Value *, llvm::Constant *, 4> ReplaceWithConstMap;
SmallVector<Instruction *, 8> InstrsToErase;
- // Map the block to reversed postorder traversal number. It is used to
- // find back edge easily.
- DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
-
typedef SmallVector<NonLocalDepResult, 64> LoadDepVect;
typedef SmallVector<gvn::AvailableValueInBlock, 64> AvailValInBlkVect;
typedef SmallVector<BasicBlock *, 64> UnavailBlkVect;
@@ -239,7 +214,7 @@ private:
bool performPRE(Function &F);
bool performScalarPRE(Instruction *I);
bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
- BasicBlock *Curr, unsigned int ValNo);
+ unsigned int ValNo);
Value *findLeader(const BasicBlock *BB, uint32_t num);
void cleanupGlobalSets();
void verifyRemoved(const Instruction *I) const;
@@ -251,7 +226,6 @@ private:
bool processFoldableCondBr(BranchInst *BI);
void addDeadBlock(BasicBlock *BB);
void assignValNumForDeadCode();
- void assignBlockRPONumber(Function &F);
};
/// Create a legacy GVN pass. This also allows parameterizing whether or not
diff --git a/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
index e4906b709e4b..4554b5cbc644 100644
--- a/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
+++ b/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -17,21 +17,39 @@
namespace llvm {
+class ConstantInt;
class Instruction;
class MemCpyInst;
class MemMoveInst;
class MemSetInst;
+class TargetTransformInfo;
class Value;
/// Emit a loop implementing the semantics of llvm.memcpy with the equivalent
/// arguments at \p InsertBefore.
-void createMemCpyLoop(Instruction *InsertBefore,
- Value *SrcAddr, Value *DstAddr, Value *CopyLen,
- unsigned SrcAlign, unsigned DestAlign,
+void createMemCpyLoop(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr,
+ Value *CopyLen, unsigned SrcAlign, unsigned DestAlign,
bool SrcIsVolatile, bool DstIsVolatile);
+/// Emit a loop implementing the semantics of llvm.memcpy where the size is not
+/// a compile-time constant. Loop will be insterted at \p InsertBefore.
+void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr,
+ Value *DstAddr, Value *CopyLen,
+ unsigned SrcAlign, unsigned DestAlign,
+ bool SrcIsVolatile, bool DstIsVolatile,
+ const TargetTransformInfo &TTI);
+
+/// Emit a loop implementing the semantics of an llvm.memcpy whose size is a
+/// compile time constant. Loop is inserted at \p InsertBefore.
+void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
+ Value *DstAddr, ConstantInt *CopyLen,
+ unsigned SrcAlign, unsigned DestAlign,
+ bool SrcIsVolatile, bool DstIsVolatile,
+ const TargetTransformInfo &TTI);
+
+
/// Expand \p MemCpy as a loop. \p MemCpy is not deleted.
-void expandMemCpyAsLoop(MemCpyInst *MemCpy);
+void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI);
/// Expand \p MemMove as a loop. \p MemMove is not deleted.
void expandMemMoveAsLoop(MemMoveInst *MemMove);
diff --git a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
index b0448fed9f4d..2dd205d8b2af 100644
--- a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
+++ b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -22,10 +22,10 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
-namespace llvm {
-
#define DEBUG_TYPE "ssaupdater"
+namespace llvm {
+
class CastInst;
class PHINode;
template<typename T> class SSAUpdaterTraits;
@@ -453,8 +453,8 @@ public:
}
};
-#undef DEBUG_TYPE // "ssaupdater"
+} // end llvm namespace
-} // End llvm namespace
+#undef DEBUG_TYPE // "ssaupdater"
-#endif
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
diff --git a/include/llvm/module.modulemap b/include/llvm/module.modulemap
index dd419e861316..766198bbc5de 100644
--- a/include/llvm/module.modulemap
+++ b/include/llvm/module.modulemap
@@ -23,6 +23,7 @@ module LLVM_Backend {
exclude header "CodeGen/CommandFlags.h"
exclude header "CodeGen/LinkAllAsmWriterComponents.h"
exclude header "CodeGen/LinkAllCodegenComponents.h"
+ exclude header "CodeGen/GlobalISel/InstructionSelectorImpl.h"
// These are intended for (repeated) textual inclusion.
textual header "CodeGen/DIEValue.def"
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index b52a1d7b24d6..e682a644ef2c 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -1006,7 +1006,7 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
// Because they cannot partially overlap and because fields in an array
// cannot overlap, if we can prove the final indices are different between
// GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
-
+
// If the last indices are constants, we've already checked they don't
// equal each other so we can exit early.
if (C1 && C2)
diff --git a/lib/Analysis/BranchProbabilityInfo.cpp b/lib/Analysis/BranchProbabilityInfo.cpp
index 23d5a887c34a..a329e5ad48c9 100644
--- a/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/lib/Analysis/BranchProbabilityInfo.cpp
@@ -538,7 +538,7 @@ bool BranchProbabilityInfo::calcZeroHeuristics(const BasicBlock *BB,
// InstCombine canonicalizes X <= 0 into X < 1.
// X <= 0 -> Unlikely
isProb = false;
- } else if (CV->isAllOnesValue()) {
+ } else if (CV->isMinusOne()) {
switch (CI->getPredicate()) {
case CmpInst::ICMP_EQ:
// X == -1 -> Unlikely
diff --git a/lib/Analysis/CGSCCPassManager.cpp b/lib/Analysis/CGSCCPassManager.cpp
index 9d4521221f47..3ddefc6520a7 100644
--- a/lib/Analysis/CGSCCPassManager.cpp
+++ b/lib/Analysis/CGSCCPassManager.cpp
@@ -196,18 +196,117 @@ FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC &C,
bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate(
LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
CGSCCAnalysisManager::Invalidator &Inv) {
- for (LazyCallGraph::Node &N : C)
- FAM->invalidate(N.getFunction(), PA);
+ // If literally everything is preserved, we're done.
+ if (PA.areAllPreserved())
+ return false; // This is still a valid proxy.
+
+ // If this proxy isn't marked as preserved, then even if the result remains
+ // valid, the key itself may no longer be valid, so we clear everything.
+ //
+ // Note that in order to preserve this proxy, a module pass must ensure that
+ // the FAM has been completely updated to handle the deletion of functions.
+ // Specifically, any FAM-cached results for those functions need to have been
+ // forcibly cleared. When preserved, this proxy will only invalidate results
+ // cached on functions *still in the module* at the end of the module pass.
+ auto PAC = PA.getChecker<FunctionAnalysisManagerCGSCCProxy>();
+ if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<LazyCallGraph::SCC>>()) {
+ for (LazyCallGraph::Node &N : C)
+ FAM->clear(N.getFunction());
+
+ return true;
+ }
+
+ // Directly check if the relevant set is preserved.
+ bool AreFunctionAnalysesPreserved =
+ PA.allAnalysesInSetPreserved<AllAnalysesOn<Function>>();
+
+ // Now walk all the functions to see if any inner analysis invalidation is
+ // necessary.
+ for (LazyCallGraph::Node &N : C) {
+ Function &F = N.getFunction();
+ Optional<PreservedAnalyses> FunctionPA;
+
+ // Check to see whether the preserved set needs to be pruned based on
+ // SCC-level analysis invalidation that triggers deferred invalidation
+ // registered with the outer analysis manager proxy for this function.
+ if (auto *OuterProxy =
+ FAM->getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F))
+ for (const auto &OuterInvalidationPair :
+ OuterProxy->getOuterInvalidations()) {
+ AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
+ const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
+ if (Inv.invalidate(OuterAnalysisID, C, PA)) {
+ if (!FunctionPA)
+ FunctionPA = PA;
+ for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
+ FunctionPA->abandon(InnerAnalysisID);
+ }
+ }
+
+ // Check if we needed a custom PA set, and if so we'll need to run the
+ // inner invalidation.
+ if (FunctionPA) {
+ FAM->invalidate(F, *FunctionPA);
+ continue;
+ }
- // This proxy doesn't need to handle invalidation itself. Instead, the
- // module-level CGSCC proxy handles it above by ensuring that if the
- // module-level FAM proxy becomes invalid the entire SCC layer, which
- // includes this proxy, is cleared.
+ // Otherwise we only need to do invalidation if the original PA set didn't
+ // preserve all function analyses.
+ if (!AreFunctionAnalysesPreserved)
+ FAM->invalidate(F, PA);
+ }
+
+ // Return false to indicate that this result is still a valid proxy.
return false;
}
} // End llvm namespace
+/// When a new SCC is created for the graph and there might be function
+/// analysis results cached for the functions now in that SCC two forms of
+/// updates are required.
+///
+/// First, a proxy from the SCC to the FunctionAnalysisManager needs to be
+/// created so that any subsequent invalidation events to the SCC are
+/// propagated to the function analysis results cached for functions within it.
+///
+/// Second, if any of the functions within the SCC have analysis results with
+/// outer analysis dependencies, then those dependencies would point to the
+/// *wrong* SCC's analysis result. We forcibly invalidate the necessary
+/// function analyses so that they don't retain stale handles.
+static void updateNewSCCFunctionAnalyses(LazyCallGraph::SCC &C,
+ LazyCallGraph &G,
+ CGSCCAnalysisManager &AM) {
+ // Get the relevant function analysis manager.
+ auto &FAM =
+ AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, G).getManager();
+
+ // Now walk the functions in this SCC and invalidate any function analysis
+ // results that might have outer dependencies on an SCC analysis.
+ for (LazyCallGraph::Node &N : C) {
+ Function &F = N.getFunction();
+
+ auto *OuterProxy =
+ FAM.getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F);
+ if (!OuterProxy)
+ // No outer analyses were queried, nothing to do.
+ continue;
+
+ // Forcibly abandon all the inner analyses with dependencies, but
+ // invalidate nothing else.
+ auto PA = PreservedAnalyses::all();
+ for (const auto &OuterInvalidationPair :
+ OuterProxy->getOuterInvalidations()) {
+ const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
+ for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
+ PA.abandon(InnerAnalysisID);
+ }
+
+ // Now invalidate anything we found.
+ FAM.invalidate(F, PA);
+ }
+}
+
namespace {
/// Helper function to update both the \c CGSCCAnalysisManager \p AM and the \c
/// CGSCCPassManager's \c CGSCCUpdateResult \p UR based on a range of newly
@@ -236,7 +335,6 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
dbgs() << "Enqueuing the existing SCC in the worklist:" << *C << "\n";
SCC *OldC = C;
- (void)OldC;
// Update the current SCC. Note that if we have new SCCs, this must actually
// change the SCC.
@@ -245,6 +343,26 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
C = &*NewSCCRange.begin();
assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
+ // If we had a cached FAM proxy originally, we will want to create more of
+ // them for each SCC that was split off.
+ bool NeedFAMProxy =
+ AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(*OldC) != nullptr;
+
+ // We need to propagate an invalidation call to all but the newly current SCC
+ // because the outer pass manager won't do that for us after splitting them.
+ // FIXME: We should accept a PreservedAnalysis from the CG updater so that if
+ // there are preserved ananalyses we can avoid invalidating them here for
+ // split-off SCCs.
+ // We know however that this will preserve any FAM proxy so go ahead and mark
+ // that.
+ PreservedAnalyses PA;
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ AM.invalidate(*OldC, PA);
+
+ // Ensure the now-current SCC's function analyses are updated.
+ if (NeedFAMProxy)
+ updateNewSCCFunctionAnalyses(*C, G, AM);
+
for (SCC &NewC :
reverse(make_range(std::next(NewSCCRange.begin()), NewSCCRange.end()))) {
assert(C != &NewC && "No need to re-visit the current SCC!");
@@ -252,6 +370,14 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
UR.CWorklist.insert(&NewC);
if (DebugLogging)
dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n";
+
+ // Ensure new SCCs' function analyses are updated.
+ if (NeedFAMProxy)
+ updateNewSCCFunctionAnalyses(NewC, G, AM);
+
+ // Also propagate a normal invalidation to the new SCC as only the current
+ // will get one from the pass manager infrastructure.
+ AM.invalidate(NewC, PA);
}
return C;
}
@@ -349,14 +475,6 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// For separate SCCs this is trivial.
RC->switchTrivialInternalEdgeToRef(N, TargetN);
} else {
- // Otherwise we may end up re-structuring the call graph. First,
- // invalidate any SCC analyses. We have to do this before we split
- // functions into new SCCs and lose track of where their analyses are
- // cached.
- // FIXME: We should accept a more precise preserved set here. For
- // example, it might be possible to preserve some function analyses
- // even as the SCC structure is changed.
- AM.invalidate(*C, PreservedAnalyses::none());
// Now update the call graph.
C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G,
N, C, AM, UR, DebugLogging);
@@ -424,13 +542,6 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
continue;
}
- // Otherwise we may end up re-structuring the call graph. First, invalidate
- // any SCC analyses. We have to do this before we split functions into new
- // SCCs and lose track of where their analyses are cached.
- // FIXME: We should accept a more precise preserved set here. For example,
- // it might be possible to preserve some function analyses even as the SCC
- // structure is changed.
- AM.invalidate(*C, PreservedAnalyses::none());
// Now update the call graph.
C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N,
C, AM, UR, DebugLogging);
@@ -459,25 +570,48 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// Otherwise we are switching an internal ref edge to a call edge. This
// may merge away some SCCs, and we add those to the UpdateResult. We also
// need to make sure to update the worklist in the event SCCs have moved
- // before the current one in the post-order sequence.
+ // before the current one in the post-order sequence
+ bool HasFunctionAnalysisProxy = false;
auto InitialSCCIndex = RC->find(*C) - RC->begin();
- auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, *CallTarget);
- if (!InvalidatedSCCs.empty()) {
+ bool FormedCycle = RC->switchInternalEdgeToCall(
+ N, *CallTarget, [&](ArrayRef<SCC *> MergedSCCs) {
+ for (SCC *MergedC : MergedSCCs) {
+ assert(MergedC != &TargetC && "Cannot merge away the target SCC!");
+
+ HasFunctionAnalysisProxy |=
+ AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(
+ *MergedC) != nullptr;
+
+ // Mark that this SCC will no longer be valid.
+ UR.InvalidatedSCCs.insert(MergedC);
+
+ // FIXME: We should really do a 'clear' here to forcibly release
+ // memory, but we don't have a good way of doing that and
+ // preserving the function analyses.
+ auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ AM.invalidate(*MergedC, PA);
+ }
+ });
+
+ // If we formed a cycle by creating this call, we need to update more data
+ // structures.
+ if (FormedCycle) {
C = &TargetC;
assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
- // Any analyses cached for this SCC are no longer precise as the shape
- // has changed by introducing this cycle.
- AM.invalidate(*C, PreservedAnalyses::none());
-
- for (SCC *InvalidatedC : InvalidatedSCCs) {
- assert(InvalidatedC != C && "Cannot invalidate the current SCC!");
- UR.InvalidatedSCCs.insert(InvalidatedC);
+ // If one of the invalidated SCCs had a cached proxy to a function
+ // analysis manager, we need to create a proxy in the new current SCC as
+ // the invaliadted SCCs had their functions moved.
+ if (HasFunctionAnalysisProxy)
+ AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G);
- // Also clear any cached analyses for the SCCs that are dead. This
- // isn't really necessary for correctness but can release memory.
- AM.clear(*InvalidatedC);
- }
+ // Any analyses cached for this SCC are no longer precise as the shape
+ // has changed by introducing this cycle. However, we have taken care to
+ // update the proxies so it remains valide.
+ auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ AM.invalidate(*C, PA);
}
auto NewSCCIndex = RC->find(*C) - RC->begin();
if (InitialSCCIndex < NewSCCIndex) {
diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp
index 2093f0fdec12..3b0026ba10e9 100644
--- a/lib/Analysis/CaptureTracking.cpp
+++ b/lib/Analysis/CaptureTracking.cpp
@@ -94,8 +94,8 @@ namespace {
// guarantee that 'I' never reaches 'BeforeHere' through a back-edge or
// by its successors, i.e, prune if:
//
- // (1) BB is an entry block or have no sucessors.
- // (2) There's no path coming back through BB sucessors.
+ // (1) BB is an entry block or have no successors.
+ // (2) There's no path coming back through BB successors.
if (BB == &BB->getParent()->getEntryBlock() ||
!BB->getTerminator()->getNumSuccessors())
return true;
diff --git a/lib/Analysis/DemandedBits.cpp b/lib/Analysis/DemandedBits.cpp
index 926b28d6094a..9c53f9140ca3 100644
--- a/lib/Analysis/DemandedBits.cpp
+++ b/lib/Analysis/DemandedBits.cpp
@@ -143,9 +143,8 @@ void DemandedBits::determineLiveOperandBits(
break;
case Instruction::Shl:
if (OperandNo == 0)
- if (ConstantInt *CI =
- dyn_cast<ConstantInt>(UserI->getOperand(1))) {
- uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1);
+ if (auto *ShiftAmtC = dyn_cast<ConstantInt>(UserI->getOperand(1))) {
+ uint64_t ShiftAmt = ShiftAmtC->getLimitedValue(BitWidth - 1);
AB = AOut.lshr(ShiftAmt);
// If the shift is nuw/nsw, then the high bits are not dead
@@ -159,9 +158,8 @@ void DemandedBits::determineLiveOperandBits(
break;
case Instruction::LShr:
if (OperandNo == 0)
- if (ConstantInt *CI =
- dyn_cast<ConstantInt>(UserI->getOperand(1))) {
- uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1);
+ if (auto *ShiftAmtC = dyn_cast<ConstantInt>(UserI->getOperand(1))) {
+ uint64_t ShiftAmt = ShiftAmtC->getLimitedValue(BitWidth - 1);
AB = AOut.shl(ShiftAmt);
// If the shift is exact, then the low bits are not dead
@@ -172,9 +170,8 @@ void DemandedBits::determineLiveOperandBits(
break;
case Instruction::AShr:
if (OperandNo == 0)
- if (ConstantInt *CI =
- dyn_cast<ConstantInt>(UserI->getOperand(1))) {
- uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1);
+ if (auto *ShiftAmtC = dyn_cast<ConstantInt>(UserI->getOperand(1))) {
+ uint64_t ShiftAmt = ShiftAmtC->getLimitedValue(BitWidth - 1);
AB = AOut.shl(ShiftAmt);
// Because the high input bit is replicated into the
// high-order bits of the result, if we need any of those
diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp
index e4d58bf1b4eb..34eccc07f265 100644
--- a/lib/Analysis/DependenceAnalysis.cpp
+++ b/lib/Analysis/DependenceAnalysis.cpp
@@ -3342,7 +3342,8 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
UsefulGEP = isLoopInvariant(SrcPtrSCEV, LI->getLoopFor(Src->getParent())) &&
isLoopInvariant(DstPtrSCEV, LI->getLoopFor(Dst->getParent())) &&
- (SrcGEP->getNumOperands() == DstGEP->getNumOperands());
+ (SrcGEP->getNumOperands() == DstGEP->getNumOperands()) &&
+ isKnownPredicate(CmpInst::ICMP_EQ, SrcPtrSCEV, DstPtrSCEV);
}
unsigned Pairs = UsefulGEP ? SrcGEP->idx_end() - SrcGEP->idx_begin() : 1;
SmallVector<Subscript, 4> Pair(Pairs);
@@ -3371,7 +3372,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
if (Delinearize && CommonLevels > 1) {
if (tryDelinearize(Src, Dst, Pair)) {
- DEBUG(dbgs() << " delinerized GEP\n");
+ DEBUG(dbgs() << " delinearized GEP\n");
Pairs = Pair.size();
}
}
@@ -3796,7 +3797,7 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep,
if (Delinearize && CommonLevels > 1) {
if (tryDelinearize(Src, Dst, Pair)) {
- DEBUG(dbgs() << " delinerized GEP\n");
+ DEBUG(dbgs() << " delinearized GEP\n");
Pairs = Pair.size();
}
}
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index d9e32a3c417e..f6632020b8fc 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -560,7 +560,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
return Y;
/// i1 add -> xor.
- if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
+ if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
return V;
@@ -598,7 +598,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// folding.
static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
bool AllowNonInbounds = false) {
- assert(V->getType()->getScalarType()->isPointerTy());
+ assert(V->getType()->isPtrOrPtrVectorTy());
Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
@@ -627,8 +627,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
}
break;
}
- assert(V->getType()->getScalarType()->isPointerTy() &&
- "Unexpected operand type!");
+ assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
} while (Visited.insert(V).second);
Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
@@ -771,7 +770,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
// i1 sub -> xor.
- if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
+ if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
return V;
@@ -902,7 +901,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return X;
// i1 mul -> and.
- if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
+ if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
return V;
@@ -998,7 +997,7 @@ static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) {
// X % 1 -> 0
// If this is a boolean op (single-bit element type), we can't have
// division-by-zero or remainder-by-zero, so assume the divisor is 1.
- if (match(Op1, m_One()) || Ty->getScalarType()->isIntegerTy(1))
+ if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1))
return IsDiv ? Op0 : Constant::getNullValue(Ty);
return nullptr;
@@ -2251,7 +2250,7 @@ static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
Value *RHS, const SimplifyQuery &Q) {
Type *ITy = GetCompareTy(LHS); // The return type.
Type *OpTy = LHS->getType(); // The operand type.
- if (!OpTy->getScalarType()->isIntegerTy(1))
+ if (!OpTy->isIntOrIntVectorTy(1))
return nullptr;
// A boolean compared to true/false can be simplified in 14 out of the 20
diff --git a/lib/Analysis/LazyCallGraph.cpp b/lib/Analysis/LazyCallGraph.cpp
index b6a9436cc1ec..a4c3e43b4b0c 100644
--- a/lib/Analysis/LazyCallGraph.cpp
+++ b/lib/Analysis/LazyCallGraph.cpp
@@ -456,8 +456,10 @@ updatePostorderSequenceForEdgeInsertion(
return make_range(SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx);
}
-SmallVector<LazyCallGraph::SCC *, 1>
-LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
+bool
+LazyCallGraph::RefSCC::switchInternalEdgeToCall(
+ Node &SourceN, Node &TargetN,
+ function_ref<void(ArrayRef<SCC *> MergeSCCs)> MergeCB) {
assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
SmallVector<SCC *, 1> DeletedSCCs;
@@ -475,7 +477,7 @@ LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
// we've just added more connectivity.
if (&SourceSCC == &TargetSCC) {
SourceN->setEdgeKind(TargetN, Edge::Call);
- return DeletedSCCs;
+ return false; // No new cycle.
}
// At this point we leverage the postorder list of SCCs to detect when the
@@ -488,7 +490,7 @@ LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
int TargetIdx = SCCIndices[&TargetSCC];
if (TargetIdx < SourceIdx) {
SourceN->setEdgeKind(TargetN, Edge::Call);
- return DeletedSCCs;
+ return false; // No new cycle.
}
// Compute the SCCs which (transitively) reach the source.
@@ -555,12 +557,16 @@ LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
SourceSCC, TargetSCC, SCCs, SCCIndices, ComputeSourceConnectedSet,
ComputeTargetConnectedSet);
+ // Run the user's callback on the merged SCCs before we actually merge them.
+ if (MergeCB)
+ MergeCB(makeArrayRef(MergeRange.begin(), MergeRange.end()));
+
// If the merge range is empty, then adding the edge didn't actually form any
// new cycles. We're done.
if (MergeRange.begin() == MergeRange.end()) {
// Now that the SCC structure is finalized, flip the kind to call.
SourceN->setEdgeKind(TargetN, Edge::Call);
- return DeletedSCCs;
+ return false; // No new cycle.
}
#ifndef NDEBUG
@@ -596,8 +602,8 @@ LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
// Now that the SCC structure is finalized, flip the kind to call.
SourceN->setEdgeKind(TargetN, Edge::Call);
- // And we're done!
- return DeletedSCCs;
+ // And we're done, but we did form a new cycle.
+ return true;
}
void LazyCallGraph::RefSCC::switchTrivialInternalEdgeToRef(Node &SourceN,
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 9713588537b3..ada600a69b87 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -405,7 +405,7 @@ void Lint::visitMemoryReference(Instruction &I,
Assert(!isa<UndefValue>(UnderlyingObject),
"Undefined behavior: Undef pointer dereference", &I);
Assert(!isa<ConstantInt>(UnderlyingObject) ||
- !cast<ConstantInt>(UnderlyingObject)->isAllOnesValue(),
+ !cast<ConstantInt>(UnderlyingObject)->isMinusOne(),
"Unusual: All-ones pointer dereference", &I);
Assert(!isa<ConstantInt>(UnderlyingObject) ||
!cast<ConstantInt>(UnderlyingObject)->isOne(),
diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp
index ff68810abb82..baf932432a0a 100644
--- a/lib/Analysis/LoopInfo.cpp
+++ b/lib/Analysis/LoopInfo.cpp
@@ -131,13 +131,13 @@ PHINode *Loop::getCanonicalInductionVariable() const {
PHINode *PN = cast<PHINode>(I);
if (ConstantInt *CI =
dyn_cast<ConstantInt>(PN->getIncomingValueForBlock(Incoming)))
- if (CI->isNullValue())
+ if (CI->isZero())
if (Instruction *Inc =
dyn_cast<Instruction>(PN->getIncomingValueForBlock(Backedge)))
if (Inc->getOpcode() == Instruction::Add &&
Inc->getOperand(0) == PN)
if (ConstantInt *CI = dyn_cast<ConstantInt>(Inc->getOperand(1)))
- if (CI->equalsInt(1))
+ if (CI->isOne())
return PN;
}
return nullptr;
@@ -460,7 +460,7 @@ protected:
void UnloopUpdater::updateBlockParents() {
if (Unloop.getNumBlocks()) {
// Perform a post order CFG traversal of all blocks within this loop,
- // propagating the nearest loop from sucessors to predecessors.
+ // propagating the nearest loop from successors to predecessors.
LoopBlocksTraversal Traversal(DFS, LI);
for (BasicBlock *POI : Traversal) {
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index f88d54b21e1e..7327c07499be 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -505,6 +505,22 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
return unknown();
}
+/// When we're compiling N-bit code, and the user uses parameters that are
+/// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into
+/// trouble with APInt size issues. This function handles resizing + overflow
+/// checks for us. Check and zext or trunc \p I depending on IntTyBits and
+/// I's value.
+bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) {
+ // More bits than we can handle. Checking the bit width isn't necessary, but
+ // it's faster than checking active bits, and should give `false` in the
+ // vast majority of cases.
+ if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits)
+ return false;
+ if (I.getBitWidth() != IntTyBits)
+ I = I.zextOrTrunc(IntTyBits);
+ return true;
+}
+
SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
if (!I.getAllocatedType()->isSized())
return unknown();
@@ -515,8 +531,14 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
Value *ArraySize = I.getArraySize();
if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
- Size *= C->getValue().zextOrSelf(IntTyBits);
- return std::make_pair(align(Size, I.getAlignment()), Zero);
+ APInt NumElems = C->getValue();
+ if (!CheckedZextOrTrunc(NumElems))
+ return unknown();
+
+ bool Overflow;
+ Size = Size.umul_ov(NumElems, Overflow);
+ return Overflow ? unknown() : std::make_pair(align(Size, I.getAlignment()),
+ Zero);
}
return unknown();
}
@@ -561,21 +583,6 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
if (!Arg)
return unknown();
- // When we're compiling N-bit code, and the user uses parameters that are
- // greater than N bits (e.g. uint64_t on a 32-bit build), we can run into
- // trouble with APInt size issues. This function handles resizing + overflow
- // checks for us.
- auto CheckedZextOrTrunc = [&](APInt &I) {
- // More bits than we can handle. Checking the bit width isn't necessary, but
- // it's faster than checking active bits, and should give `false` in the
- // vast majority of cases.
- if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits)
- return false;
- if (I.getBitWidth() != IntTyBits)
- I = I.zextOrTrunc(IntTyBits);
- return true;
- };
-
APInt Size = Arg->getValue();
if (!CheckedZextOrTrunc(Size))
return unknown();
diff --git a/lib/Analysis/ModuleSummaryAnalysis.cpp b/lib/Analysis/ModuleSummaryAnalysis.cpp
index 095647e1bd20..e9e354ebb88f 100644
--- a/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -266,7 +266,7 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
// sample PGO, to enable the same inlines as the profiled optimized binary.
for (auto &I : F.getImportGUIDs())
CallGraphEdges[Index.getOrInsertValueInfo(I)].updateHotness(
- CalleeInfo::HotnessType::Hot);
+ CalleeInfo::HotnessType::Critical);
bool NonRenamableLocal = isNonRenamableLocal(F);
bool NotEligibleForImport =
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 678ad3af5e85..3fb1ab980add 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -326,7 +326,7 @@ bool SCEV::isOne() const {
bool SCEV::isAllOnesValue() const {
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
- return SC->getValue()->isAllOnesValue();
+ return SC->getValue()->isMinusOne();
return false;
}
@@ -2743,7 +2743,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
}
// If we are left with a constant one being multiplied, strip it off.
- if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
+ if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) {
Ops.erase(Ops.begin());
--Idx;
} else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
@@ -2939,7 +2939,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
"SCEVUDivExpr operand types don't match!");
if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
- if (RHSC->getValue()->equalsInt(1))
+ if (RHSC->getValue()->isOne())
return LHS; // X udiv 1 --> x
// If the denominator is zero, the result of the udiv is undefined. Don't
// try to analyze it, because the resolution chosen here may differ from
@@ -5421,9 +5421,9 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// For an expression like x&255 that merely masks off the high bits,
// use zext(trunc(x)) as the SCEV expression.
if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
- if (CI->isNullValue())
+ if (CI->isZero())
return getSCEV(BO->RHS);
- if (CI->isAllOnesValue())
+ if (CI->isMinusOne())
return getSCEV(BO->LHS);
const APInt &A = CI->getValue();
@@ -5498,7 +5498,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
case Instruction::Xor:
if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
// If the RHS of xor is -1, then this is a not operation.
- if (CI->isAllOnesValue())
+ if (CI->isMinusOne())
return getNotSCEV(getSCEV(BO->LHS));
// Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
@@ -5577,7 +5577,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
if (CI->getValue().uge(BitWidth))
break;
- if (CI->isNullValue())
+ if (CI->isZero())
return getSCEV(BO->LHS); // shift by zero --> noop
uint64_t AShrAmt = CI->getZExtValue();
@@ -7626,7 +7626,7 @@ ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
// to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
// We have not yet seen any such cases.
const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
- if (!StepC || StepC->getValue()->equalsInt(0))
+ if (!StepC || StepC->getValue()->isZero())
return getCouldNotCompute();
// For positive steps (counting up until unsigned overflow):
@@ -7640,7 +7640,7 @@ ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
// Handle unitary steps, which cannot wraparound.
// 1*N = -Start; -1*N = Start (mod 2^BW), so:
// N = Distance (as unsigned)
- if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
+ if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
APInt MaxBECount = getUnsignedRangeMax(Distance);
// When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
@@ -7696,7 +7696,7 @@ ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
// If the value is a constant, check to see if it is known to be non-zero
// already. If so, the backedge will execute zero times.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
- if (!C->getValue()->isNullValue())
+ if (!C->getValue()->isZero())
return getZero(C->getType());
return getCouldNotCompute(); // Otherwise it will loop infinitely.
}
diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp
index f938a9a52065..94bbc58541a7 100644
--- a/lib/Analysis/TargetTransformInfo.cpp
+++ b/lib/Analysis/TargetTransformInfo.cpp
@@ -16,6 +16,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include <utility>
@@ -23,6 +24,11 @@ using namespace llvm;
#define DEBUG_TYPE "tti"
+static cl::opt<bool> UseWideMemcpyLoopLowering(
+ "use-wide-memcpy-loop-lowering", cl::init(false),
+ cl::desc("Enables the new wide memcpy loop lowering in Transforms/Utils."),
+ cl::Hidden);
+
namespace {
/// \brief No-op implementation of the TTI interface using the utility base
/// classes.
@@ -482,6 +488,25 @@ Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
}
+Type *TargetTransformInfo::getMemcpyLoopLoweringType(LLVMContext &Context,
+ Value *Length,
+ unsigned SrcAlign,
+ unsigned DestAlign) const {
+ return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAlign,
+ DestAlign);
+}
+
+void TargetTransformInfo::getMemcpyLoopResidualLoweringType(
+ SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
+ unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const {
+ TTIImpl->getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
+ SrcAlign, DestAlign);
+}
+
+bool TargetTransformInfo::useWideIRMemcpyLoopLowering() const {
+ return UseWideMemcpyLoopLowering;
+}
+
bool TargetTransformInfo::areInlineCompatible(const Function *Caller,
const Function *Callee) const {
return TTIImpl->areInlineCompatible(Caller, Callee);
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index fd6e3a643bf0..9e042da8801d 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -1500,12 +1500,10 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = Known.getBitWidth();
- assert((V->getType()->isIntOrIntVectorTy() ||
- V->getType()->getScalarType()->isPointerTy()) &&
+ assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
+ V->getType()->isPtrOrPtrVectorTy()) &&
"Not integer or pointer type!");
- assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
- (!V->getType()->isIntOrIntVectorTy() ||
- V->getType()->getScalarSizeInBits() == BitWidth) &&
+ assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth &&
"V and Known should have same BitWidth");
(void)BitWidth;
@@ -1952,7 +1950,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
}
// Check if all incoming values are non-zero constant.
bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
- return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
+ return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
});
if (AllNonZeroConstants)
return true;
@@ -4393,7 +4391,7 @@ isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
}
Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
- const DataLayout &DL, bool InvertAPred,
+ const DataLayout &DL, bool LHSIsFalse,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
@@ -4402,26 +4400,51 @@ Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
return None;
Type *OpTy = LHS->getType();
- assert(OpTy->getScalarType()->isIntegerTy(1));
+ assert(OpTy->isIntOrIntVectorTy(1));
// LHS ==> RHS by definition
- if (!InvertAPred && LHS == RHS)
- return true;
+ if (LHS == RHS)
+ return !LHSIsFalse;
if (OpTy->isVectorTy())
// TODO: extending the code below to handle vectors
return None;
assert(OpTy->isIntegerTy(1) && "implied by above");
- ICmpInst::Predicate APred, BPred;
- Value *ALHS, *ARHS;
Value *BLHS, *BRHS;
+ ICmpInst::Predicate BPred;
+ // We expect the RHS to be an icmp.
+ if (!match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
+ return None;
- if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
- !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
+ Value *ALHS, *ARHS;
+ ICmpInst::Predicate APred;
+ // The LHS can be an 'or', 'and', or 'icmp'.
+ if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS)))) {
+ // The remaining tests are all recursive, so bail out if we hit the limit.
+ if (Depth == MaxDepth)
+ return None;
+ // If the result of an 'or' is false, then we know both legs of the 'or' are
+ // false. Similarly, if the result of an 'and' is true, then we know both
+ // legs of the 'and' are true.
+ if ((LHSIsFalse && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
+ (!LHSIsFalse && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
+ if (Optional<bool> Implication = isImpliedCondition(
+ ALHS, RHS, DL, LHSIsFalse, Depth + 1, AC, CxtI, DT))
+ return Implication;
+ if (Optional<bool> Implication = isImpliedCondition(
+ ARHS, RHS, DL, LHSIsFalse, Depth + 1, AC, CxtI, DT))
+ return Implication;
+ return None;
+ }
return None;
+ }
+ // All of the below logic assumes both LHS and RHS are icmps.
+ assert(isa<ICmpInst>(LHS) && isa<ICmpInst>(RHS) && "Expected icmps.");
- if (InvertAPred)
+ // The rest of the logic assumes the LHS condition is true. If that's not the
+ // case, invert the predicate to make it so.
+ if (LHSIsFalse)
APred = CmpInst::getInversePredicate(APred);
// Can we infer anything when the two compares have matching operands?
diff --git a/lib/Analysis/VectorUtils.cpp b/lib/Analysis/VectorUtils.cpp
index 0ace8fa382bc..554d132c2ab7 100644
--- a/lib/Analysis/VectorUtils.cpp
+++ b/lib/Analysis/VectorUtils.cpp
@@ -301,7 +301,7 @@ const llvm::Value *llvm::getSplatValue(const Value *V) {
auto *InsertEltInst =
dyn_cast<InsertElementInst>(ShuffleInst->getOperand(0));
if (!InsertEltInst || !isa<ConstantInt>(InsertEltInst->getOperand(2)) ||
- !cast<ConstantInt>(InsertEltInst->getOperand(2))->isNullValue())
+ !cast<ConstantInt>(InsertEltInst->getOperand(2))->isZero())
return nullptr;
return InsertEltInst->getOperand(1);
diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp
index a49276099f19..428bb21fbf51 100644
--- a/lib/AsmParser/LLLexer.cpp
+++ b/lib/AsmParser/LLLexer.cpp
@@ -542,7 +542,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(release);
KEYWORD(acq_rel);
KEYWORD(seq_cst);
- KEYWORD(singlethread);
+ KEYWORD(syncscope);
KEYWORD(nnan);
KEYWORD(ninf);
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index 9ad31125f4b8..717eb0e00f4f 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -1919,20 +1919,42 @@ bool LLParser::parseAllocSizeArguments(unsigned &BaseSizeArg,
}
/// ParseScopeAndOrdering
-/// if isAtomic: ::= 'singlethread'? AtomicOrdering
+/// if isAtomic: ::= SyncScope? AtomicOrdering
/// else: ::=
///
/// This sets Scope and Ordering to the parsed values.
-bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
+bool LLParser::ParseScopeAndOrdering(bool isAtomic, SyncScope::ID &SSID,
AtomicOrdering &Ordering) {
if (!isAtomic)
return false;
- Scope = CrossThread;
- if (EatIfPresent(lltok::kw_singlethread))
- Scope = SingleThread;
+ return ParseScope(SSID) || ParseOrdering(Ordering);
+}
+
+/// ParseScope
+/// ::= syncscope("singlethread" | "<target scope>")?
+///
+/// This sets synchronization scope ID to the ID of the parsed value.
+bool LLParser::ParseScope(SyncScope::ID &SSID) {
+ SSID = SyncScope::System;
+ if (EatIfPresent(lltok::kw_syncscope)) {
+ auto StartParenAt = Lex.getLoc();
+ if (!EatIfPresent(lltok::lparen))
+ return Error(StartParenAt, "Expected '(' in syncscope");
+
+ std::string SSN;
+ auto SSNAt = Lex.getLoc();
+ if (ParseStringConstant(SSN))
+ return Error(SSNAt, "Expected synchronization scope name");
- return ParseOrdering(Ordering);
+ auto EndParenAt = Lex.getLoc();
+ if (!EatIfPresent(lltok::rparen))
+ return Error(EndParenAt, "Expected ')' in syncscope");
+
+ SSID = Context.getOrInsertSyncScopeID(SSN);
+ }
+
+ return false;
}
/// ParseOrdering
@@ -3061,7 +3083,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
} else {
assert(Opc == Instruction::ICmp && "Unexpected opcode for CmpInst!");
if (!Val0->getType()->isIntOrIntVectorTy() &&
- !Val0->getType()->getScalarType()->isPointerTy())
+ !Val0->getType()->isPtrOrPtrVectorTy())
return Error(ID.Loc, "icmp requires pointer or integer operands");
ID.ConstantVal = ConstantExpr::getICmp(Pred, Val0, Val1);
}
@@ -3210,7 +3232,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
if (Opc == Instruction::GetElementPtr) {
if (Elts.size() == 0 ||
- !Elts[0]->getType()->getScalarType()->isPointerTy())
+ !Elts[0]->getType()->isPtrOrPtrVectorTy())
return Error(ID.Loc, "base of getelementptr must be a pointer");
Type *BaseType = Elts[0]->getType();
@@ -3226,7 +3248,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end());
for (Constant *Val : Indices) {
Type *ValTy = Val->getType();
- if (!ValTy->getScalarType()->isIntegerTy())
+ if (!ValTy->isIntOrIntVectorTy())
return Error(ID.Loc, "getelementptr index must be an integer");
if (ValTy->isVectorTy()) {
unsigned ValNumEl = ValTy->getVectorNumElements();
@@ -5697,7 +5719,7 @@ bool LLParser::ParseCompare(Instruction *&Inst, PerFunctionState &PFS,
} else {
assert(Opc == Instruction::ICmp && "Unknown opcode for CmpInst!");
if (!LHS->getType()->isIntOrIntVectorTy() &&
- !LHS->getType()->getScalarType()->isPointerTy())
+ !LHS->getType()->isPtrOrPtrVectorTy())
return Error(Loc, "icmp requires integer operands");
Inst = new ICmpInst(CmpInst::Predicate(Pred), LHS, RHS);
}
@@ -6100,7 +6122,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
bool AteExtraComma = false;
bool isAtomic = false;
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
+ SyncScope::ID SSID = SyncScope::System;
if (Lex.getKind() == lltok::kw_atomic) {
isAtomic = true;
@@ -6118,7 +6140,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
if (ParseType(Ty) ||
ParseToken(lltok::comma, "expected comma after load's type") ||
ParseTypeAndValue(Val, Loc, PFS) ||
- ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
+ ParseScopeAndOrdering(isAtomic, SSID, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true;
@@ -6134,7 +6156,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
return Error(ExplicitTypeLoc,
"explicit pointee type doesn't match operand's pointee type");
- Inst = new LoadInst(Ty, Val, "", isVolatile, Alignment, Ordering, Scope);
+ Inst = new LoadInst(Ty, Val, "", isVolatile, Alignment, Ordering, SSID);
return AteExtraComma ? InstExtraComma : InstNormal;
}
@@ -6149,7 +6171,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
bool AteExtraComma = false;
bool isAtomic = false;
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
+ SyncScope::ID SSID = SyncScope::System;
if (Lex.getKind() == lltok::kw_atomic) {
isAtomic = true;
@@ -6165,7 +6187,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseToken(lltok::comma, "expected ',' after store operand") ||
ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
- ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
+ ParseScopeAndOrdering(isAtomic, SSID, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true;
@@ -6181,7 +6203,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
Ordering == AtomicOrdering::AcquireRelease)
return Error(Loc, "atomic store cannot use Acquire ordering");
- Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
+ Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, SSID);
return AteExtraComma ? InstExtraComma : InstNormal;
}
@@ -6193,7 +6215,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
bool AteExtraComma = false;
AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic;
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
+ SyncScope::ID SSID = SyncScope::System;
bool isVolatile = false;
bool isWeak = false;
@@ -6208,7 +6230,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after cmpxchg cmp operand") ||
ParseTypeAndValue(New, NewLoc, PFS) ||
- ParseScopeAndOrdering(true /*Always atomic*/, Scope, SuccessOrdering) ||
+ ParseScopeAndOrdering(true /*Always atomic*/, SSID, SuccessOrdering) ||
ParseOrdering(FailureOrdering))
return true;
@@ -6231,7 +6253,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
if (!New->getType()->isFirstClassType())
return Error(NewLoc, "cmpxchg operand must be a first class value");
AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
- Ptr, Cmp, New, SuccessOrdering, FailureOrdering, Scope);
+ Ptr, Cmp, New, SuccessOrdering, FailureOrdering, SSID);
CXI->setVolatile(isVolatile);
CXI->setWeak(isWeak);
Inst = CXI;
@@ -6245,7 +6267,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
bool AteExtraComma = false;
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
+ SyncScope::ID SSID = SyncScope::System;
bool isVolatile = false;
AtomicRMWInst::BinOp Operation;
@@ -6271,7 +6293,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after atomicrmw address") ||
ParseTypeAndValue(Val, ValLoc, PFS) ||
- ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+ ParseScopeAndOrdering(true /*Always atomic*/, SSID, Ordering))
return true;
if (Ordering == AtomicOrdering::Unordered)
@@ -6288,7 +6310,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
" integer");
AtomicRMWInst *RMWI =
- new AtomicRMWInst(Operation, Ptr, Val, Ordering, Scope);
+ new AtomicRMWInst(Operation, Ptr, Val, Ordering, SSID);
RMWI->setVolatile(isVolatile);
Inst = RMWI;
return AteExtraComma ? InstExtraComma : InstNormal;
@@ -6298,8 +6320,8 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
/// ::= 'fence' 'singlethread'? AtomicOrdering
int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
- if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+ SyncScope::ID SSID = SyncScope::System;
+ if (ParseScopeAndOrdering(true /*Always atomic*/, SSID, Ordering))
return true;
if (Ordering == AtomicOrdering::Unordered)
@@ -6307,7 +6329,7 @@ int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
if (Ordering == AtomicOrdering::Monotonic)
return TokError("fence cannot be monotonic");
- Inst = new FenceInst(Context, Ordering, Scope);
+ Inst = new FenceInst(Context, Ordering, SSID);
return InstNormal;
}
@@ -6349,7 +6371,7 @@ int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
break;
}
if (ParseTypeAndValue(Val, EltLoc, PFS)) return true;
- if (!Val->getType()->getScalarType()->isIntegerTy())
+ if (!Val->getType()->isIntOrIntVectorTy())
return Error(EltLoc, "getelementptr index must be an integer");
if (Val->getType()->isVectorTy()) {
diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h
index 4616c2e86947..d5b059355c42 100644
--- a/lib/AsmParser/LLParser.h
+++ b/lib/AsmParser/LLParser.h
@@ -241,8 +241,9 @@ namespace llvm {
bool ParseOptionalCallingConv(unsigned &CC);
bool ParseOptionalAlignment(unsigned &Alignment);
bool ParseOptionalDerefAttrBytes(lltok::Kind AttrKind, uint64_t &Bytes);
- bool ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
+ bool ParseScopeAndOrdering(bool isAtomic, SyncScope::ID &SSID,
AtomicOrdering &Ordering);
+ bool ParseScope(SyncScope::ID &SSID);
bool ParseOrdering(AtomicOrdering &Ordering);
bool ParseOptionalStackAlignment(unsigned &Alignment);
bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma);
diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h
index 6c8ed7da495d..9c7a06de81b4 100644
--- a/lib/AsmParser/LLToken.h
+++ b/lib/AsmParser/LLToken.h
@@ -93,7 +93,7 @@ enum Kind {
kw_release,
kw_acq_rel,
kw_seq_cst,
- kw_singlethread,
+ kw_syncscope,
kw_nnan,
kw_ninf,
kw_nsz,
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 1ebef3173135..2b4970a80cdd 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -513,6 +513,7 @@ class BitcodeReader : public BitcodeReaderBase, public GVMaterializer {
TBAAVerifier TBAAVerifyHelper;
std::vector<std::string> BundleTags;
+ SmallVector<SyncScope::ID, 8> SSIDs;
public:
BitcodeReader(BitstreamCursor Stream, StringRef Strtab,
@@ -648,6 +649,7 @@ private:
Error parseTypeTable();
Error parseTypeTableBody();
Error parseOperandBundleTags();
+ Error parseSyncScopeNames();
Expected<Value *> recordValue(SmallVectorImpl<uint64_t> &Record,
unsigned NameIndex, Triple &TT);
@@ -668,6 +670,8 @@ private:
Error findFunctionInStream(
Function *F,
DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator);
+
+ SyncScope::ID getDecodedSyncScopeID(unsigned Val);
};
/// Class to manage reading and parsing function summary index bitcode
@@ -998,14 +1002,6 @@ static AtomicOrdering getDecodedOrdering(unsigned Val) {
}
}
-static SynchronizationScope getDecodedSynchScope(unsigned Val) {
- switch (Val) {
- case bitc::SYNCHSCOPE_SINGLETHREAD: return SingleThread;
- default: // Map unknown scopes to cross-thread.
- case bitc::SYNCHSCOPE_CROSSTHREAD: return CrossThread;
- }
-}
-
static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) {
switch (Val) {
default: // Map unknown selection kinds to any.
@@ -1745,6 +1741,44 @@ Error BitcodeReader::parseOperandBundleTags() {
}
}
+Error BitcodeReader::parseSyncScopeNames() {
+ if (Stream.EnterSubBlock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID))
+ return error("Invalid record");
+
+ if (!SSIDs.empty())
+ return error("Invalid multiple synchronization scope names blocks");
+
+ SmallVector<uint64_t, 64> Record;
+ while (true) {
+ BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ if (SSIDs.empty())
+ return error("Invalid empty synchronization scope names block");
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Synchronization scope names are implicitly mapped to synchronization
+ // scope IDs by their order.
+
+ if (Stream.readRecord(Entry.ID, Record) != bitc::SYNC_SCOPE_NAME)
+ return error("Invalid record");
+
+ SmallString<16> SSN;
+ if (convertToString(Record, 0, SSN))
+ return error("Invalid record");
+
+ SSIDs.push_back(Context.getOrInsertSyncScopeID(SSN));
+ Record.clear();
+ }
+}
+
/// Associate a value with its name from the given index in the provided record.
Expected<Value *> BitcodeReader::recordValue(SmallVectorImpl<uint64_t> &Record,
unsigned NameIndex, Triple &TT) {
@@ -3132,6 +3166,10 @@ Error BitcodeReader::parseModule(uint64_t ResumeBit,
if (Error Err = parseOperandBundleTags())
return Err;
break;
+ case bitc::SYNC_SCOPE_NAMES_BLOCK_ID:
+ if (Error Err = parseSyncScopeNames())
+ return Err;
+ break;
}
continue;
@@ -4204,7 +4242,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
break;
}
case bitc::FUNC_CODE_INST_LOADATOMIC: {
- // LOADATOMIC: [opty, op, align, vol, ordering, synchscope]
+ // LOADATOMIC: [opty, op, align, vol, ordering, ssid]
unsigned OpNum = 0;
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
@@ -4226,12 +4264,12 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
return error("Invalid record");
if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
unsigned Align;
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
return Err;
- I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SynchScope);
+ I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SSID);
InstructionList.push_back(I);
break;
@@ -4260,7 +4298,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
}
case bitc::FUNC_CODE_INST_STOREATOMIC:
case bitc::FUNC_CODE_INST_STOREATOMIC_OLD: {
- // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, synchscope]
+ // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, ssid]
unsigned OpNum = 0;
Value *Val, *Ptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
@@ -4280,20 +4318,20 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
Ordering == AtomicOrdering::Acquire ||
Ordering == AtomicOrdering::AcquireRelease)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
return error("Invalid record");
unsigned Align;
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
return Err;
- I = new StoreInst(Val, Ptr, Record[OpNum+1], Align, Ordering, SynchScope);
+ I = new StoreInst(Val, Ptr, Record[OpNum+1], Align, Ordering, SSID);
InstructionList.push_back(I);
break;
}
case bitc::FUNC_CODE_INST_CMPXCHG_OLD:
case bitc::FUNC_CODE_INST_CMPXCHG: {
- // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, synchscope,
+ // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, ssid,
// failureordering?, isweak?]
unsigned OpNum = 0;
Value *Ptr, *Cmp, *New;
@@ -4310,7 +4348,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (SuccessOrdering == AtomicOrdering::NotAtomic ||
SuccessOrdering == AtomicOrdering::Unordered)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 2]);
if (Error Err = typeCheckLoadStoreInst(Cmp->getType(), Ptr->getType()))
return Err;
@@ -4322,7 +4360,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
FailureOrdering = getDecodedOrdering(Record[OpNum + 3]);
I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering,
- SynchScope);
+ SSID);
cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
if (Record.size() < 8) {
@@ -4339,7 +4377,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
break;
}
case bitc::FUNC_CODE_INST_ATOMICRMW: {
- // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, synchscope]
+ // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, ssid]
unsigned OpNum = 0;
Value *Ptr, *Val;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
@@ -4356,13 +4394,13 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (Ordering == AtomicOrdering::NotAtomic ||
Ordering == AtomicOrdering::Unordered)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
- I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
+ I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SSID);
cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]);
InstructionList.push_back(I);
break;
}
- case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, synchscope]
+ case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, ssid]
if (2 != Record.size())
return error("Invalid record");
AtomicOrdering Ordering = getDecodedOrdering(Record[0]);
@@ -4370,8 +4408,8 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
Ordering == AtomicOrdering::Unordered ||
Ordering == AtomicOrdering::Monotonic)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]);
- I = new FenceInst(Context, Ordering, SynchScope);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[1]);
+ I = new FenceInst(Context, Ordering, SSID);
InstructionList.push_back(I);
break;
}
@@ -4567,6 +4605,14 @@ Error BitcodeReader::findFunctionInStream(
return Error::success();
}
+SyncScope::ID BitcodeReader::getDecodedSyncScopeID(unsigned Val) {
+ if (Val == SyncScope::SingleThread || Val == SyncScope::System)
+ return SyncScope::ID(Val);
+ if (Val >= SSIDs.size())
+ return SyncScope::System; // Map unknown synchronization scopes to system.
+ return SSIDs[Val];
+}
+
//===----------------------------------------------------------------------===//
// GVMaterializer implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index b2b1ea6de374..0e518d2bbc8f 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -114,6 +114,8 @@ class ModuleBitcodeWriter : public BitcodeWriterBase {
/// True if a module hash record should be written.
bool GenerateHash;
+ SHA1 Hasher;
+
/// If non-null, when GenerateHash is true, the resulting hash is written
/// into ModHash. When GenerateHash is false, that specified value
/// is used as the hash instead of computing from the generated bitcode.
@@ -176,6 +178,8 @@ public:
private:
uint64_t bitcodeStartBit() { return BitcodeStartBit; }
+ size_t addToStrtab(StringRef Str);
+
void writeAttributeGroupTable();
void writeAttributeTable();
void writeTypeTable();
@@ -262,6 +266,7 @@ private:
const GlobalObject &GO);
void writeModuleMetadataKinds();
void writeOperandBundleTags();
+ void writeSyncScopeNames();
void writeConstants(unsigned FirstVal, unsigned LastVal, bool isGlobal);
void writeModuleConstants();
bool pushValueAndType(const Value *V, unsigned InstID,
@@ -312,6 +317,10 @@ private:
return VE.getValueID(VI.getValue());
}
std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; }
+
+ unsigned getEncodedSyncScopeID(SyncScope::ID SSID) {
+ return unsigned(SSID);
+ }
};
/// Class to manage the bitcode writing for a combined index.
@@ -481,14 +490,6 @@ static unsigned getEncodedOrdering(AtomicOrdering Ordering) {
llvm_unreachable("Invalid ordering");
}
-static unsigned getEncodedSynchScope(SynchronizationScope SynchScope) {
- switch (SynchScope) {
- case SingleThread: return bitc::SYNCHSCOPE_SINGLETHREAD;
- case CrossThread: return bitc::SYNCHSCOPE_CROSSTHREAD;
- }
- llvm_unreachable("Invalid synch scope");
-}
-
static void writeStringRecord(BitstreamWriter &Stream, unsigned Code,
StringRef Str, unsigned AbbrevToUse) {
SmallVector<unsigned, 64> Vals;
@@ -947,11 +948,17 @@ static unsigned getEncodedUnnamedAddr(const GlobalValue &GV) {
llvm_unreachable("Invalid unnamed_addr");
}
+size_t ModuleBitcodeWriter::addToStrtab(StringRef Str) {
+ if (GenerateHash)
+ Hasher.update(Str);
+ return StrtabBuilder.add(Str);
+}
+
void ModuleBitcodeWriter::writeComdats() {
SmallVector<unsigned, 64> Vals;
for (const Comdat *C : VE.getComdats()) {
// COMDAT: [strtab offset, strtab size, selection_kind]
- Vals.push_back(StrtabBuilder.add(C->getName()));
+ Vals.push_back(addToStrtab(C->getName()));
Vals.push_back(C->getName().size());
Vals.push_back(getEncodedComdatSelectionKind(*C));
Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0);
@@ -1122,7 +1129,7 @@ void ModuleBitcodeWriter::writeModuleInfo() {
// linkage, alignment, section, visibility, threadlocal,
// unnamed_addr, externally_initialized, dllstorageclass,
// comdat, attributes]
- Vals.push_back(StrtabBuilder.add(GV.getName()));
+ Vals.push_back(addToStrtab(GV.getName()));
Vals.push_back(GV.getName().size());
Vals.push_back(VE.getTypeID(GV.getValueType()));
Vals.push_back(GV.getType()->getAddressSpace() << 2 | 2 | GV.isConstant());
@@ -1161,7 +1168,7 @@ void ModuleBitcodeWriter::writeModuleInfo() {
// linkage, paramattrs, alignment, section, visibility, gc,
// unnamed_addr, prologuedata, dllstorageclass, comdat,
// prefixdata, personalityfn]
- Vals.push_back(StrtabBuilder.add(F.getName()));
+ Vals.push_back(addToStrtab(F.getName()));
Vals.push_back(F.getName().size());
Vals.push_back(VE.getTypeID(F.getFunctionType()));
Vals.push_back(F.getCallingConv());
@@ -1191,7 +1198,7 @@ void ModuleBitcodeWriter::writeModuleInfo() {
for (const GlobalAlias &A : M.aliases()) {
// ALIAS: [strtab offset, strtab size, alias type, aliasee val#, linkage,
// visibility, dllstorageclass, threadlocal, unnamed_addr]
- Vals.push_back(StrtabBuilder.add(A.getName()));
+ Vals.push_back(addToStrtab(A.getName()));
Vals.push_back(A.getName().size());
Vals.push_back(VE.getTypeID(A.getValueType()));
Vals.push_back(A.getType()->getAddressSpace());
@@ -1210,7 +1217,7 @@ void ModuleBitcodeWriter::writeModuleInfo() {
for (const GlobalIFunc &I : M.ifuncs()) {
// IFUNC: [strtab offset, strtab size, ifunc type, address space, resolver
// val#, linkage, visibility]
- Vals.push_back(StrtabBuilder.add(I.getName()));
+ Vals.push_back(addToStrtab(I.getName()));
Vals.push_back(I.getName().size());
Vals.push_back(VE.getTypeID(I.getValueType()));
Vals.push_back(I.getType()->getAddressSpace());
@@ -2032,6 +2039,24 @@ void ModuleBitcodeWriter::writeOperandBundleTags() {
Stream.ExitBlock();
}
+void ModuleBitcodeWriter::writeSyncScopeNames() {
+ SmallVector<StringRef, 8> SSNs;
+ M.getContext().getSyncScopeNames(SSNs);
+ if (SSNs.empty())
+ return;
+
+ Stream.EnterSubblock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID, 2);
+
+ SmallVector<uint64_t, 64> Record;
+ for (auto SSN : SSNs) {
+ Record.append(SSN.begin(), SSN.end());
+ Stream.EmitRecord(bitc::SYNC_SCOPE_NAME, Record, 0);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) {
if ((int64_t)V >= 0)
Vals.push_back(V << 1);
@@ -2648,7 +2673,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(cast<LoadInst>(I).isVolatile());
if (cast<LoadInst>(I).isAtomic()) {
Vals.push_back(getEncodedOrdering(cast<LoadInst>(I).getOrdering()));
- Vals.push_back(getEncodedSynchScope(cast<LoadInst>(I).getSynchScope()));
+ Vals.push_back(getEncodedSyncScopeID(cast<LoadInst>(I).getSyncScopeID()));
}
break;
case Instruction::Store:
@@ -2662,7 +2687,8 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(cast<StoreInst>(I).isVolatile());
if (cast<StoreInst>(I).isAtomic()) {
Vals.push_back(getEncodedOrdering(cast<StoreInst>(I).getOrdering()));
- Vals.push_back(getEncodedSynchScope(cast<StoreInst>(I).getSynchScope()));
+ Vals.push_back(
+ getEncodedSyncScopeID(cast<StoreInst>(I).getSyncScopeID()));
}
break;
case Instruction::AtomicCmpXchg:
@@ -2674,7 +2700,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(
getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getSuccessOrdering()));
Vals.push_back(
- getEncodedSynchScope(cast<AtomicCmpXchgInst>(I).getSynchScope()));
+ getEncodedSyncScopeID(cast<AtomicCmpXchgInst>(I).getSyncScopeID()));
Vals.push_back(
getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getFailureOrdering()));
Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak());
@@ -2688,12 +2714,12 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(cast<AtomicRMWInst>(I).isVolatile());
Vals.push_back(getEncodedOrdering(cast<AtomicRMWInst>(I).getOrdering()));
Vals.push_back(
- getEncodedSynchScope(cast<AtomicRMWInst>(I).getSynchScope()));
+ getEncodedSyncScopeID(cast<AtomicRMWInst>(I).getSyncScopeID()));
break;
case Instruction::Fence:
Code = bitc::FUNC_CODE_INST_FENCE;
Vals.push_back(getEncodedOrdering(cast<FenceInst>(I).getOrdering()));
- Vals.push_back(getEncodedSynchScope(cast<FenceInst>(I).getSynchScope()));
+ Vals.push_back(getEncodedSyncScopeID(cast<FenceInst>(I).getSyncScopeID()));
break;
case Instruction::Call: {
const CallInst &CI = cast<CallInst>(I);
@@ -3648,7 +3674,6 @@ void ModuleBitcodeWriter::writeModuleHash(size_t BlockStartPos) {
// Emit the module's hash.
// MODULE_CODE_HASH: [5*i32]
if (GenerateHash) {
- SHA1 Hasher;
uint32_t Vals[5];
Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&(Buffer)[BlockStartPos],
Buffer.size() - BlockStartPos));
@@ -3707,6 +3732,7 @@ void ModuleBitcodeWriter::write() {
writeUseListBlock(nullptr);
writeOperandBundleTags();
+ writeSyncScopeNames();
// Emit function bodies.
DenseMap<const Function *, uint64_t> FunctionToBitcodeIndex;
diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp
index 344136b1f195..aa9c8e94d08a 100644
--- a/lib/CodeGen/AtomicExpandPass.cpp
+++ b/lib/CodeGen/AtomicExpandPass.cpp
@@ -361,7 +361,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
auto *NewLI = Builder.CreateLoad(NewAddr);
NewLI->setAlignment(LI->getAlignment());
NewLI->setVolatile(LI->isVolatile());
- NewLI->setAtomic(LI->getOrdering(), LI->getSynchScope());
+ NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
@@ -444,7 +444,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
NewSI->setAlignment(SI->getAlignment());
NewSI->setVolatile(SI->isVolatile());
- NewSI->setAtomic(SI->getOrdering(), SI->getSynchScope());
+ NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
SI->eraseFromParent();
return NewSI;
@@ -801,7 +801,7 @@ void AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted);
AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg(
PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, CI->getSuccessOrdering(),
- CI->getFailureOrdering(), CI->getSynchScope());
+ CI->getFailureOrdering(), CI->getSyncScopeID());
NewCI->setVolatile(CI->isVolatile());
// When we're building a strong cmpxchg, we need a loop, so you
// might think we could use a weak cmpxchg inside. But, using strong
@@ -924,7 +924,7 @@ AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *
auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
CI->getSuccessOrdering(),
CI->getFailureOrdering(),
- CI->getSynchScope());
+ CI->getSyncScopeID());
NewCI->setVolatile(CI->isVolatile());
NewCI->setWeak(CI->isWeak());
DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
diff --git a/lib/CodeGen/CodeGen.cpp b/lib/CodeGen/CodeGen.cpp
index faa5f139cf7b..b7fd45a3f6a6 100644
--- a/lib/CodeGen/CodeGen.cpp
+++ b/lib/CodeGen/CodeGen.cpp
@@ -78,6 +78,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializePreISelIntrinsicLoweringLegacyPassPass(Registry);
initializeProcessImplicitDefsPass(Registry);
initializeRABasicPass(Registry);
+ initializeRAFastPass(Registry);
initializeRAGreedyPass(Registry);
initializeRegisterCoalescerPass(Registry);
initializeRenameIndependentSubregsPass(Registry);
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index b50e76f2e3ba..b7155ac2480a 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -4270,6 +4270,7 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Value *Consensus = nullptr;
unsigned NumUsesConsensus = 0;
bool IsNumUsesConsensusValid = false;
+ bool PhiSeen = false;
SmallVector<Instruction*, 16> AddrModeInsts;
ExtAddrMode AddrMode;
TypePromotionTransaction TPT(RemovedInsts);
@@ -4289,6 +4290,7 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
if (PHINode *P = dyn_cast<PHINode>(V)) {
for (Value *IncValue : P->incoming_values())
worklist.push_back(IncValue);
+ PhiSeen = true;
continue;
}
@@ -4342,9 +4344,10 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
TPT.commit();
// If all the instructions matched are already in this BB, don't do anything.
- if (none_of(AddrModeInsts, [&](Value *V) {
+ // If we saw Phi node then it is not local definitely.
+ if (!PhiSeen && none_of(AddrModeInsts, [&](Value *V) {
return IsNonLocalValue(V, MemoryInst->getParent());
- })) {
+ })) {
DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n");
return false;
}
@@ -4390,6 +4393,20 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
AddrMode.Scale = 0;
}
+ // It is only safe to sign extend the BaseReg if we know that the math
+ // required to create it did not overflow before we extend it. Since
+ // the original IR value was tossed in favor of a constant back when
+ // the AddrMode was created we need to bail out gracefully if widths
+ // do not match instead of extending it.
+ //
+ // (See below for code to add the scale.)
+ if (AddrMode.Scale) {
+ Type *ScaledRegTy = AddrMode.ScaledReg->getType();
+ if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
+ cast<IntegerType>(ScaledRegTy)->getBitWidth())
+ return false;
+ }
+
if (AddrMode.BaseGV) {
if (ResultPtr)
return false;
@@ -4440,19 +4457,11 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Value *V = AddrMode.ScaledReg;
if (V->getType() == IntPtrTy) {
// done.
- } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
- cast<IntegerType>(V->getType())->getBitWidth()) {
- V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
} else {
- // It is only safe to sign extend the BaseReg if we know that the math
- // required to create it did not overflow before we extend it. Since
- // the original IR value was tossed in favor of a constant back when
- // the AddrMode was created we need to bail out gracefully if widths
- // do not match instead of extending it.
- Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex);
- if (I && (ResultIndex != AddrMode.BaseReg))
- I->eraseFromParent();
- return false;
+ assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
+ cast<IntegerType>(V->getType())->getBitWidth() &&
+ "We can't transform if ScaledReg is too narrow");
+ V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
}
if (AddrMode.Scale != 1)
diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 521037f9d206..ed1bd995e60b 100644
--- a/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -345,7 +345,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
Flags, DL->getTypeStoreSize(LI.getType()),
getMemOpAlignment(LI), AAMDNodes(), nullptr,
- LI.getSynchScope(), LI.getOrdering()));
+ LI.getSyncScopeID(), LI.getOrdering()));
return true;
}
@@ -363,7 +363,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
*MF->getMachineMemOperand(
MachinePointerInfo(SI.getPointerOperand()), Flags,
DL->getTypeStoreSize(SI.getValueOperand()->getType()),
- getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSynchScope(),
+ getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
SI.getOrdering()));
return true;
}
diff --git a/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index 860fc9a4f8b6..bf427225d6a9 100644
--- a/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -16,7 +16,11 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -26,6 +30,9 @@
using namespace llvm;
+InstructionSelector::MatcherState::MatcherState(unsigned MaxRenderers)
+ : Renderers(MaxRenderers, nullptr), MIs() {}
+
InstructionSelector::InstructionSelector() = default;
bool InstructionSelector::constrainOperandRegToRegClass(
diff --git a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 84b0a0ac4157..49fb5e8f075b 100644
--- a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -99,23 +99,19 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
llvm_unreachable("Unknown libcall function");
}
-LegalizerHelper::LegalizeResult llvm::replaceWithLibcall(
- MachineInstr &MI, MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
- const CallLowering::ArgInfo &Result, ArrayRef<CallLowering::ArgInfo> Args) {
+LegalizerHelper::LegalizeResult
+llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+ const CallLowering::ArgInfo &Result,
+ ArrayRef<CallLowering::ArgInfo> Args) {
auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
const char *Name = TLI.getLibcallName(Libcall);
+
MIRBuilder.getMF().getFrameInfo().setHasCalls(true);
- MIRBuilder.setInstr(MI);
if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(Libcall),
MachineOperand::CreateES(Name), Result, Args))
return LegalizerHelper::UnableToLegalize;
- // We're about to remove MI, so move the insert point after it.
- MIRBuilder.setInsertPt(MIRBuilder.getMBB(),
- std::next(MIRBuilder.getInsertPt()));
-
- MI.eraseFromParent();
return LegalizerHelper::Legalized;
}
@@ -123,10 +119,9 @@ static LegalizerHelper::LegalizeResult
simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
Type *OpType) {
auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
- return replaceWithLibcall(MI, MIRBuilder, Libcall,
- {MI.getOperand(0).getReg(), OpType},
- {{MI.getOperand(1).getReg(), OpType},
- {MI.getOperand(2).getReg(), OpType}});
+ return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType},
+ {{MI.getOperand(1).getReg(), OpType},
+ {MI.getOperand(2).getReg(), OpType}});
}
LegalizerHelper::LegalizeResult
@@ -135,6 +130,8 @@ LegalizerHelper::libcall(MachineInstr &MI) {
unsigned Size = LLTy.getSizeInBits();
auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ MIRBuilder.setInstr(MI);
+
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
@@ -143,15 +140,24 @@ LegalizerHelper::libcall(MachineInstr &MI) {
case TargetOpcode::G_SREM:
case TargetOpcode::G_UREM: {
Type *HLTy = Type::getInt32Ty(Ctx);
- return simpleLibcall(MI, MIRBuilder, Size, HLTy);
+ auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
+ if (Status != Legalized)
+ return Status;
+ break;
}
case TargetOpcode::G_FADD:
case TargetOpcode::G_FPOW:
case TargetOpcode::G_FREM: {
Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
- return simpleLibcall(MI, MIRBuilder, Size, HLTy);
+ auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
+ if (Status != Legalized)
+ return Status;
+ break;
}
}
+
+ MI.eraseFromParent();
+ return Legalized;
}
LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
diff --git a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 47c6214c0552..4636806c3f08 100644
--- a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -166,19 +166,24 @@ MachineInstrBuilder MachineIRBuilder::buildGlobalValue(unsigned Res,
.addGlobalAddress(GV);
}
-MachineInstrBuilder MachineIRBuilder::buildAdd(unsigned Res, unsigned Op0,
+MachineInstrBuilder MachineIRBuilder::buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0,
unsigned Op1) {
assert((MRI->getType(Res).isScalar() || MRI->getType(Res).isVector()) &&
"invalid operand type");
assert(MRI->getType(Res) == MRI->getType(Op0) &&
MRI->getType(Res) == MRI->getType(Op1) && "type mismatch");
- return buildInstr(TargetOpcode::G_ADD)
+ return buildInstr(Opcode)
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
}
+MachineInstrBuilder MachineIRBuilder::buildAdd(unsigned Res, unsigned Op0,
+ unsigned Op1) {
+ return buildBinaryOp(TargetOpcode::G_ADD, Res, Op0, Op1);
+}
+
MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
unsigned Op1) {
assert(MRI->getType(Res).isPointer() &&
@@ -222,41 +227,22 @@ MachineInstrBuilder MachineIRBuilder::buildPtrMask(unsigned Res, unsigned Op0,
MachineInstrBuilder MachineIRBuilder::buildSub(unsigned Res, unsigned Op0,
unsigned Op1) {
- assert((MRI->getType(Res).isScalar() || MRI->getType(Res).isVector()) &&
- "invalid operand type");
- assert(MRI->getType(Res) == MRI->getType(Op0) &&
- MRI->getType(Res) == MRI->getType(Op1) && "type mismatch");
-
- return buildInstr(TargetOpcode::G_SUB)
- .addDef(Res)
- .addUse(Op0)
- .addUse(Op1);
+ return buildBinaryOp(TargetOpcode::G_SUB, Res, Op0, Op1);
}
MachineInstrBuilder MachineIRBuilder::buildMul(unsigned Res, unsigned Op0,
unsigned Op1) {
- assert((MRI->getType(Res).isScalar() || MRI->getType(Res).isVector()) &&
- "invalid operand type");
- assert(MRI->getType(Res) == MRI->getType(Op0) &&
- MRI->getType(Res) == MRI->getType(Op1) && "type mismatch");
-
- return buildInstr(TargetOpcode::G_MUL)
- .addDef(Res)
- .addUse(Op0)
- .addUse(Op1);
+ return buildBinaryOp(TargetOpcode::G_MUL, Res, Op0, Op1);
}
MachineInstrBuilder MachineIRBuilder::buildAnd(unsigned Res, unsigned Op0,
unsigned Op1) {
- assert((MRI->getType(Res).isScalar() || MRI->getType(Res).isVector()) &&
- "invalid operand type");
- assert(MRI->getType(Res) == MRI->getType(Op0) &&
- MRI->getType(Res) == MRI->getType(Op1) && "type mismatch");
+ return buildBinaryOp(TargetOpcode::G_AND, Res, Op0, Op1);
+}
- return buildInstr(TargetOpcode::G_AND)
- .addDef(Res)
- .addUse(Op0)
- .addUse(Op1);
+MachineInstrBuilder MachineIRBuilder::buildOr(unsigned Res, unsigned Op0,
+ unsigned Op1) {
+ return buildBinaryOp(TargetOpcode::G_OR, Res, Op0, Op1);
}
MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
diff --git a/lib/CodeGen/LiveRegUnits.cpp b/lib/CodeGen/LiveRegUnits.cpp
index 3746b74e0528..f9ba4ffa6527 100644
--- a/lib/CodeGen/LiveRegUnits.cpp
+++ b/lib/CodeGen/LiveRegUnits.cpp
@@ -67,7 +67,7 @@ void LiveRegUnits::stepBackward(const MachineInstr &MI) {
}
}
-void LiveRegUnits::accumulateBackward(const MachineInstr &MI) {
+void LiveRegUnits::accumulate(const MachineInstr &MI) {
// Add defs, uses and regmask clobbers to the set.
for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
if (O->isReg()) {
diff --git a/lib/CodeGen/MIRParser/MILexer.cpp b/lib/CodeGen/MIRParser/MILexer.cpp
index 1f1ce6e8d725..58a655a4dee4 100644
--- a/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/lib/CodeGen/MIRParser/MILexer.cpp
@@ -365,6 +365,14 @@ static Cursor maybeLexIRValue(Cursor C, MIToken &Token,
return lexName(C, Token, MIToken::NamedIRValue, Rule.size(), ErrorCallback);
}
+static Cursor maybeLexStringConstant(Cursor C, MIToken &Token,
+ ErrorCallbackType ErrorCallback) {
+ if (C.peek() != '"')
+ return None;
+ return lexName(C, Token, MIToken::StringConstant, /*PrefixLength=*/0,
+ ErrorCallback);
+}
+
static Cursor lexVirtualRegister(Cursor C, MIToken &Token) {
auto Range = C;
C.advance(); // Skip '%'
@@ -630,6 +638,8 @@ StringRef llvm::lexMIToken(StringRef Source, MIToken &Token,
return R.remaining();
if (Cursor R = maybeLexEscapedIRValue(C, Token, ErrorCallback))
return R.remaining();
+ if (Cursor R = maybeLexStringConstant(C, Token, ErrorCallback))
+ return R.remaining();
Token.reset(MIToken::Error, C.remaining());
ErrorCallback(C.location(),
diff --git a/lib/CodeGen/MIRParser/MILexer.h b/lib/CodeGen/MIRParser/MILexer.h
index 3e9513111bf4..08b82e59c4fc 100644
--- a/lib/CodeGen/MIRParser/MILexer.h
+++ b/lib/CodeGen/MIRParser/MILexer.h
@@ -127,7 +127,8 @@ struct MIToken {
NamedIRValue,
IRValue,
QuotedIRValue, // `<constant value>`
- SubRegisterIndex
+ SubRegisterIndex,
+ StringConstant
};
private:
@@ -168,7 +169,8 @@ public:
bool isMemoryOperandFlag() const {
return Kind == kw_volatile || Kind == kw_non_temporal ||
- Kind == kw_dereferenceable || Kind == kw_invariant;
+ Kind == kw_dereferenceable || Kind == kw_invariant ||
+ Kind == StringConstant;
}
bool is(TokenKind K) const { return Kind == K; }
diff --git a/lib/CodeGen/MIRParser/MIParser.cpp b/lib/CodeGen/MIRParser/MIParser.cpp
index c58d192284dd..c68d87b15a31 100644
--- a/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/lib/CodeGen/MIRParser/MIParser.cpp
@@ -141,6 +141,8 @@ class MIParser {
StringMap<unsigned> Names2DirectTargetFlags;
/// Maps from direct target flag names to the bitmask target flag values.
StringMap<unsigned> Names2BitmaskTargetFlags;
+ /// Maps from MMO target flag names to MMO target flag values.
+ StringMap<MachineMemOperand::Flags> Names2MMOTargetFlags;
public:
MIParser(PerFunctionMIParsingState &PFS, SMDiagnostic &Error,
@@ -229,6 +231,7 @@ public:
bool parseMemoryOperandFlag(MachineMemOperand::Flags &Flags);
bool parseMemoryPseudoSourceValue(const PseudoSourceValue *&PSV);
bool parseMachinePointerInfo(MachinePointerInfo &Dest);
+ bool parseOptionalScope(LLVMContext &Context, SyncScope::ID &SSID);
bool parseOptionalAtomicOrdering(AtomicOrdering &Order);
bool parseMachineMemoryOperand(MachineMemOperand *&Dest);
@@ -318,6 +321,18 @@ private:
///
/// Return true if the name isn't a name of a bitmask target flag.
bool getBitmaskTargetFlag(StringRef Name, unsigned &Flag);
+
+ void initNames2MMOTargetFlags();
+
+ /// Try to convert a name of a MachineMemOperand target flag to the
+ /// corresponding target flag.
+ ///
+ /// Return true if the name isn't a name of a target MMO flag.
+ bool getMMOTargetFlag(StringRef Name, MachineMemOperand::Flags &Flag);
+
+ /// parseStringConstant
+ /// ::= StringConstant
+ bool parseStringConstant(std::string &Result);
};
} // end anonymous namespace
@@ -2034,7 +2049,14 @@ bool MIParser::parseMemoryOperandFlag(MachineMemOperand::Flags &Flags) {
case MIToken::kw_invariant:
Flags |= MachineMemOperand::MOInvariant;
break;
- // TODO: parse the target specific memory operand flags.
+ case MIToken::StringConstant: {
+ MachineMemOperand::Flags TF;
+ if (getMMOTargetFlag(Token.stringValue(), TF))
+ return error("use of undefined target MMO flag '" + Token.stringValue() +
+ "'");
+ Flags |= TF;
+ break;
+ }
default:
llvm_unreachable("The current token should be a memory operand flag");
}
@@ -2135,6 +2157,26 @@ bool MIParser::parseMachinePointerInfo(MachinePointerInfo &Dest) {
return false;
}
+bool MIParser::parseOptionalScope(LLVMContext &Context,
+ SyncScope::ID &SSID) {
+ SSID = SyncScope::System;
+ if (Token.is(MIToken::Identifier) && Token.stringValue() == "syncscope") {
+ lex();
+ if (expectAndConsume(MIToken::lparen))
+ return error("expected '(' in syncscope");
+
+ std::string SSN;
+ if (parseStringConstant(SSN))
+ return true;
+
+ SSID = Context.getOrInsertSyncScopeID(SSN);
+ if (expectAndConsume(MIToken::rparen))
+ return error("expected ')' in syncscope");
+ }
+
+ return false;
+}
+
bool MIParser::parseOptionalAtomicOrdering(AtomicOrdering &Order) {
Order = AtomicOrdering::NotAtomic;
if (Token.isNot(MIToken::Identifier))
@@ -2174,12 +2216,10 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
Flags |= MachineMemOperand::MOStore;
lex();
- // Optional "singlethread" scope.
- SynchronizationScope Scope = SynchronizationScope::CrossThread;
- if (Token.is(MIToken::Identifier) && Token.stringValue() == "singlethread") {
- Scope = SynchronizationScope::SingleThread;
- lex();
- }
+ // Optional synchronization scope.
+ SyncScope::ID SSID;
+ if (parseOptionalScope(MF.getFunction()->getContext(), SSID))
+ return true;
// Up to two atomic orderings (cmpxchg provides guarantees on failure).
AtomicOrdering Order, FailureOrder;
@@ -2244,7 +2284,7 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
if (expectAndConsume(MIToken::rparen))
return true;
Dest = MF.getMachineMemOperand(Ptr, Flags, Size, BaseAlignment, AAInfo, Range,
- Scope, Order, FailureOrder);
+ SSID, Order, FailureOrder);
return false;
}
@@ -2457,6 +2497,35 @@ bool MIParser::getBitmaskTargetFlag(StringRef Name, unsigned &Flag) {
return false;
}
+void MIParser::initNames2MMOTargetFlags() {
+ if (!Names2MMOTargetFlags.empty())
+ return;
+ const auto *TII = MF.getSubtarget().getInstrInfo();
+ assert(TII && "Expected target instruction info");
+ auto Flags = TII->getSerializableMachineMemOperandTargetFlags();
+ for (const auto &I : Flags)
+ Names2MMOTargetFlags.insert(
+ std::make_pair(StringRef(I.second), I.first));
+}
+
+bool MIParser::getMMOTargetFlag(StringRef Name,
+ MachineMemOperand::Flags &Flag) {
+ initNames2MMOTargetFlags();
+ auto FlagInfo = Names2MMOTargetFlags.find(Name);
+ if (FlagInfo == Names2MMOTargetFlags.end())
+ return true;
+ Flag = FlagInfo->second;
+ return false;
+}
+
+bool MIParser::parseStringConstant(std::string &Result) {
+ if (Token.isNot(MIToken::StringConstant))
+ return error("expected string constant");
+ Result = Token.stringValue();
+ lex();
+ return false;
+}
+
bool llvm::parseMachineBasicBlockDefinitions(PerFunctionMIParsingState &PFS,
StringRef Src,
SMDiagnostic &Error) {
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
index c524a9835f33..ddeacf1d1bfb 100644
--- a/lib/CodeGen/MIRPrinter.cpp
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
@@ -139,6 +140,8 @@ class MIPrinter {
ModuleSlotTracker &MST;
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds;
const DenseMap<int, FrameIndexOperand> &StackObjectOperandMapping;
+ /// Synchronization scope names registered with LLVMContext.
+ SmallVector<StringRef, 8> SSNs;
bool canPredictBranchProbabilities(const MachineBasicBlock &MBB) const;
bool canPredictSuccessors(const MachineBasicBlock &MBB) const;
@@ -162,7 +165,9 @@ public:
void print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
unsigned I, bool ShouldPrintRegisterTies,
LLT TypeToPrint, bool IsDef = false);
- void print(const MachineMemOperand &Op);
+ void print(const LLVMContext &Context, const TargetInstrInfo &TII,
+ const MachineMemOperand &Op);
+ void printSyncScope(const LLVMContext &Context, SyncScope::ID SSID);
void print(const MCCFIInstruction &CFI, const TargetRegisterInfo *TRI);
};
@@ -731,11 +736,12 @@ void MIPrinter::print(const MachineInstr &MI) {
if (!MI.memoperands_empty()) {
OS << " :: ";
+ const LLVMContext &Context = MF->getFunction()->getContext();
bool NeedComma = false;
for (const auto *Op : MI.memoperands()) {
if (NeedComma)
OS << ", ";
- print(*Op);
+ print(Context, *TII, *Op);
NeedComma = true;
}
}
@@ -1031,9 +1037,20 @@ void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
}
}
-void MIPrinter::print(const MachineMemOperand &Op) {
+static const char *getTargetMMOFlagName(const TargetInstrInfo &TII,
+ unsigned TMMOFlag) {
+ auto Flags = TII.getSerializableMachineMemOperandTargetFlags();
+ for (const auto &I : Flags) {
+ if (I.first == TMMOFlag) {
+ return I.second;
+ }
+ }
+ return nullptr;
+}
+
+void MIPrinter::print(const LLVMContext &Context, const TargetInstrInfo &TII,
+ const MachineMemOperand &Op) {
OS << '(';
- // TODO: Print operand's target specific flags.
if (Op.isVolatile())
OS << "volatile ";
if (Op.isNonTemporal())
@@ -1042,6 +1059,15 @@ void MIPrinter::print(const MachineMemOperand &Op) {
OS << "dereferenceable ";
if (Op.isInvariant())
OS << "invariant ";
+ if (Op.getFlags() & MachineMemOperand::MOTargetFlag1)
+ OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag1)
+ << "\" ";
+ if (Op.getFlags() & MachineMemOperand::MOTargetFlag2)
+ OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag2)
+ << "\" ";
+ if (Op.getFlags() & MachineMemOperand::MOTargetFlag3)
+ OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag3)
+ << "\" ";
if (Op.isLoad())
OS << "load ";
else {
@@ -1049,8 +1075,7 @@ void MIPrinter::print(const MachineMemOperand &Op) {
OS << "store ";
}
- if (Op.getSynchScope() == SynchronizationScope::SingleThread)
- OS << "singlethread ";
+ printSyncScope(Context, Op.getSyncScopeID());
if (Op.getOrdering() != AtomicOrdering::NotAtomic)
OS << toIRString(Op.getOrdering()) << ' ';
@@ -1119,6 +1144,23 @@ void MIPrinter::print(const MachineMemOperand &Op) {
OS << ')';
}
+void MIPrinter::printSyncScope(const LLVMContext &Context, SyncScope::ID SSID) {
+ switch (SSID) {
+ case SyncScope::System: {
+ break;
+ }
+ default: {
+ if (SSNs.empty())
+ Context.getSyncScopeNames(SSNs);
+
+ OS << "syncscope(\"";
+ PrintEscapedString(SSNs[SSID], OS);
+ OS << "\") ";
+ break;
+ }
+ }
+}
+
static void printCFIRegister(unsigned DwarfReg, raw_ostream &OS,
const TargetRegisterInfo *TRI) {
int Reg = TRI->getLLVMRegNum(DwarfReg, true);
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index 2d4b95974cc6..447ad629885b 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -1917,6 +1917,12 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
return;
MachineBasicBlock *Top = *LoopChain.begin();
+ MachineBasicBlock *Bottom = *std::prev(LoopChain.end());
+
+ // If ExitingBB is already the last one in a chain then nothing to do.
+ if (Bottom == ExitingBB)
+ return;
+
bool ViableTopFallthrough = false;
for (MachineBasicBlock *Pred : Top->predecessors()) {
BlockChain *PredChain = BlockToChain[Pred];
@@ -1931,7 +1937,6 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
// bottom is a viable exiting block. If so, bail out as rotating will
// introduce an unnecessary branch.
if (ViableTopFallthrough) {
- MachineBasicBlock *Bottom = *std::prev(LoopChain.end());
for (MachineBasicBlock *Succ : Bottom->successors()) {
BlockChain *SuccChain = BlockToChain[Succ];
if (!LoopBlockSet.count(Succ) &&
@@ -1944,6 +1949,36 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
if (ExitIt == LoopChain.end())
return;
+ // Rotating a loop exit to the bottom when there is a fallthrough to top
+ // trades the entry fallthrough for an exit fallthrough.
+ // If there is no bottom->top edge, but the chosen exit block does have
+ // a fallthrough, we break that fallthrough for nothing in return.
+
+ // Let's consider an example. We have a built chain of basic blocks
+ // B1, B2, ..., Bn, where Bk is a ExitingBB - chosen exit block.
+ // By doing a rotation we get
+ // Bk+1, ..., Bn, B1, ..., Bk
+ // Break of fallthrough to B1 is compensated by a fallthrough from Bk.
+ // If we had a fallthrough Bk -> Bk+1 it is broken now.
+ // It might be compensated by fallthrough Bn -> B1.
+ // So we have a condition to avoid creation of extra branch by loop rotation.
+ // All below must be true to avoid loop rotation:
+ // If there is a fallthrough to top (B1)
+ // There was fallthrough from chosen exit block (Bk) to next one (Bk+1)
+ // There is no fallthrough from bottom (Bn) to top (B1).
+ // Please note that there is no exit fallthrough from Bn because we checked it
+ // above.
+ if (ViableTopFallthrough) {
+ assert(std::next(ExitIt) != LoopChain.end() &&
+ "Exit should not be last BB");
+ MachineBasicBlock *NextBlockInChain = *std::next(ExitIt);
+ if (ExitingBB->isSuccessor(NextBlockInChain))
+ if (!Bottom->isSuccessor(Top))
+ return;
+ }
+
+ DEBUG(dbgs() << "Rotating loop to put exit " << getBlockName(ExitingBB)
+ << " at bottom\n");
std::rotate(LoopChain.begin(), std::next(ExitIt), LoopChain.end());
}
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index bbdae6e1a49e..f88e175a9776 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -305,11 +305,11 @@ MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
MachineMemOperand *MachineFunction::getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
unsigned base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
- SynchronizationScope SynchScope, AtomicOrdering Ordering,
+ SyncScope::ID SSID, AtomicOrdering Ordering,
AtomicOrdering FailureOrdering) {
return new (Allocator)
MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
- SynchScope, Ordering, FailureOrdering);
+ SSID, Ordering, FailureOrdering);
}
MachineMemOperand *
@@ -320,13 +320,13 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
MachineMemOperand(MachinePointerInfo(MMO->getValue(),
MMO->getOffset()+Offset),
MMO->getFlags(), Size, MMO->getBaseAlignment(),
- AAMDNodes(), nullptr, MMO->getSynchScope(),
+ AAMDNodes(), nullptr, MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
return new (Allocator)
MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
MMO->getOffset()+Offset),
MMO->getFlags(), Size, MMO->getBaseAlignment(),
- AAMDNodes(), nullptr, MMO->getSynchScope(),
+ AAMDNodes(), nullptr, MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
}
@@ -359,7 +359,7 @@ MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
(*I)->getFlags() & ~MachineMemOperand::MOStore,
(*I)->getSize(), (*I)->getBaseAlignment(),
(*I)->getAAInfo(), nullptr,
- (*I)->getSynchScope(), (*I)->getOrdering(),
+ (*I)->getSyncScopeID(), (*I)->getOrdering(),
(*I)->getFailureOrdering());
Result[Index] = JustLoad;
}
@@ -393,7 +393,7 @@ MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
(*I)->getFlags() & ~MachineMemOperand::MOLoad,
(*I)->getSize(), (*I)->getBaseAlignment(),
(*I)->getAAInfo(), nullptr,
- (*I)->getSynchScope(), (*I)->getOrdering(),
+ (*I)->getSyncScopeID(), (*I)->getOrdering(),
(*I)->getFailureOrdering());
Result[Index] = JustStore;
}
diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp
index 81c6dace92e0..afea5575a3ae 100644
--- a/lib/CodeGen/MachineInstr.cpp
+++ b/lib/CodeGen/MachineInstr.cpp
@@ -447,6 +447,14 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
SmallString<16> Str;
getFPImm()->getValueAPF().toString(Str);
OS << "quad " << Str;
+ } else if (getFPImm()->getType()->isX86_FP80Ty()) {
+ APFloat APF = getFPImm()->getValueAPF();
+ OS << "x86_fp80 0xK";
+ APInt API = APF.bitcastToAPInt();
+ OS << format_hex_no_prefix(API.getHiBits(16).getZExtValue(), 4,
+ /*Upper=*/true);
+ OS << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
} else {
OS << getFPImm()->getValueAPF().convertToDouble();
}
@@ -606,7 +614,7 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
uint64_t s, unsigned int a,
const AAMDNodes &AAInfo,
const MDNode *Ranges,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
AtomicOrdering Ordering,
AtomicOrdering FailureOrdering)
: PtrInfo(ptrinfo), Size(s), FlagVals(f), BaseAlignLog2(Log2_32(a) + 1),
@@ -617,8 +625,8 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
assert(getBaseAlignment() == a && "Alignment is not a power of 2!");
assert((isLoad() || isStore()) && "Not a load/store!");
- AtomicInfo.SynchScope = static_cast<unsigned>(SynchScope);
- assert(getSynchScope() == SynchScope && "Value truncated");
+ AtomicInfo.SSID = static_cast<unsigned>(SSID);
+ assert(getSyncScopeID() == SSID && "Value truncated");
AtomicInfo.Ordering = static_cast<unsigned>(Ordering);
assert(getOrdering() == Ordering && "Value truncated");
AtomicInfo.FailureOrdering = static_cast<unsigned>(FailureOrdering);
@@ -744,6 +752,12 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST) const {
OS << "(dereferenceable)";
if (isInvariant())
OS << "(invariant)";
+ if (getFlags() & MOTargetFlag1)
+ OS << "(flag1)";
+ if (getFlags() & MOTargetFlag2)
+ OS << "(flag2)";
+ if (getFlags() & MOTargetFlag3)
+ OS << "(flag3)";
}
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp
index e65c256c1bb5..fcb544806dda 100644
--- a/lib/CodeGen/MachineVerifier.cpp
+++ b/lib/CodeGen/MachineVerifier.cpp
@@ -985,6 +985,14 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
report("Operand should be tied", MO, MONum);
else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
report("Tied def doesn't match MCInstrDesc", MO, MONum);
+ else if (TargetRegisterInfo::isPhysicalRegister(MO->getReg())) {
+ const MachineOperand &MOTied = MI->getOperand(TiedTo);
+ if (!MOTied.isReg())
+ report("Tied counterpart must be a register", &MOTied, TiedTo);
+ else if (TargetRegisterInfo::isPhysicalRegister(MOTied.getReg()) &&
+ MO->getReg() != MOTied.getReg())
+ report("Tied physical registers must match.", &MOTied, TiedTo);
+ }
} else if (MO->isReg() && MO->isTied())
report("Explicit operand should not be tied", MO, MONum);
} else {
diff --git a/lib/CodeGen/MacroFusion.cpp b/lib/CodeGen/MacroFusion.cpp
index 5e279b065bbd..633a853b2c74 100644
--- a/lib/CodeGen/MacroFusion.cpp
+++ b/lib/CodeGen/MacroFusion.cpp
@@ -24,7 +24,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
STATISTIC(NumFused, "Number of instr pairs fused");
diff --git a/lib/CodeGen/PostRAHazardRecognizer.cpp b/lib/CodeGen/PostRAHazardRecognizer.cpp
index 425a59dc0375..4a50d895340a 100644
--- a/lib/CodeGen/PostRAHazardRecognizer.cpp
+++ b/lib/CodeGen/PostRAHazardRecognizer.cpp
@@ -23,7 +23,7 @@
/// This pass traverses all the instructions in a program in top-down order.
/// In contrast to the instruction scheduling passes, this pass never resets
/// the hazard recognizer to ensure it can correctly handles noop hazards at
-/// the begining of blocks.
+/// the beginning of blocks.
//
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index c606b7b83310..d5538be4bba2 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -203,6 +203,8 @@ namespace {
char RAFast::ID = 0;
}
+INITIALIZE_PASS(RAFast, "regallocfast", "Fast Register Allocator", false, false)
+
/// getStackSpaceFor - This allocates space for the specified virtual register
/// to be held on the stack.
int RAFast::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) {
@@ -244,8 +246,15 @@ void RAFast::addKillFlag(const LiveReg &LR) {
if (MO.isUse() && !LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) {
if (MO.getReg() == LR.PhysReg)
MO.setIsKill();
- else
- LR.LastUse->addRegisterKilled(LR.PhysReg, TRI, true);
+ // else, don't do anything we are problably redefining a
+ // subreg of this register and given we don't track which
+ // lanes are actually dead, we cannot insert a kill flag here.
+ // Otherwise we may end up in a situation like this:
+ // ... = (MO) physreg:sub1, physreg <implicit-use, kill>
+ // ... <== Here we would allow later pass to reuse physreg:sub1
+ // which is potentially wrong.
+ // LR:sub0 = ...
+ // ... = LR.sub1 <== This is going to use physreg:sub1
}
}
diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp
index 9562652556ac..020e81eca2dd 100644
--- a/lib/CodeGen/RegAllocGreedy.cpp
+++ b/lib/CodeGen/RegAllocGreedy.cpp
@@ -2458,7 +2458,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
do {
Reg = RecoloringCandidates.pop_back_val();
- // We cannot recolor physcal register.
+ // We cannot recolor physical register.
if (TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
diff --git a/lib/CodeGen/RegAllocPBQP.cpp b/lib/CodeGen/RegAllocPBQP.cpp
index e3baff4be4bc..9778103575fa 100644
--- a/lib/CodeGen/RegAllocPBQP.cpp
+++ b/lib/CodeGen/RegAllocPBQP.cpp
@@ -924,5 +924,3 @@ FunctionPass *llvm::createPBQPRegisterAllocator(char *customPassID) {
FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
return createPBQPRegisterAllocator();
}
-
-#undef DEBUG_TYPE
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index ff9bca092dbe..a67d07b36474 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -1227,6 +1227,34 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
SR->createDeadDef(DefIndex, Alloc);
}
}
+
+ // Make sure that the subrange for resultant undef is removed
+ // For example:
+ // vreg1:sub1<def,read-undef> = LOAD CONSTANT 1
+ // vreg2<def> = COPY vreg1
+ // ==>
+ // vreg2:sub1<def, read-undef> = LOAD CONSTANT 1
+ // ; Correct but need to remove the subrange for vreg2:sub0
+ // ; as it is now undef
+ if (NewIdx != 0 && DstInt.hasSubRanges()) {
+ // The affected subregister segments can be removed.
+ SlotIndex CurrIdx = LIS->getInstructionIndex(NewMI);
+ LaneBitmask DstMask = TRI->getSubRegIndexLaneMask(NewIdx);
+ bool UpdatedSubRanges = false;
+ for (LiveInterval::SubRange &SR : DstInt.subranges()) {
+ if ((SR.LaneMask & DstMask).none()) {
+ DEBUG(dbgs() << "Removing undefined SubRange "
+ << PrintLaneMask(SR.LaneMask) << " : " << SR << "\n");
+ // VNI is in ValNo - remove any segments in this SubRange that have this ValNo
+ if (VNInfo *RmValNo = SR.getVNInfoAt(CurrIdx.getRegSlot())) {
+ SR.removeValNo(RmValNo);
+ UpdatedSubRanges = true;
+ }
+ }
+ }
+ if (UpdatedSubRanges)
+ DstInt.removeEmptySubRanges();
+ }
} else if (NewMI.getOperand(0).getReg() != CopyDstReg) {
// The New instruction may be defining a sub-register of what's actually
// been asked for. If so it must implicitly define the whole thing.
diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp
index 05e641d9489d..fc5105aadbff 100644
--- a/lib/CodeGen/RegisterScavenging.cpp
+++ b/lib/CodeGen/RegisterScavenging.cpp
@@ -375,7 +375,8 @@ unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
static std::pair<MCPhysReg, MachineBasicBlock::iterator>
findSurvivorBackwards(const MachineRegisterInfo &MRI,
MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
- const LiveRegUnits &LiveOut, ArrayRef<MCPhysReg> AllocationOrder) {
+ const LiveRegUnits &LiveOut, ArrayRef<MCPhysReg> AllocationOrder,
+ bool RestoreAfter) {
bool FoundTo = false;
MCPhysReg Survivor = 0;
MachineBasicBlock::iterator Pos;
@@ -388,7 +389,7 @@ findSurvivorBackwards(const MachineRegisterInfo &MRI,
for (MachineBasicBlock::iterator I = From;; --I) {
const MachineInstr &MI = *I;
- Used.accumulateBackward(MI);
+ Used.accumulate(MI);
if (I == To) {
// See if one of the registers in RC wasn't used so far.
@@ -401,6 +402,11 @@ findSurvivorBackwards(const MachineRegisterInfo &MRI,
// the register which is not defined/used for the longest time.
FoundTo = true;
Pos = To;
+ // Note: It was fine so far to start our search at From, however now that
+ // we have to spill, and can only place the restore after From then
+ // add the regs used/defed by std::next(From) to the set.
+ if (RestoreAfter)
+ Used.accumulate(*std::next(From));
}
if (FoundTo) {
if (Survivor == 0 || !Used.available(Survivor)) {
@@ -575,7 +581,8 @@ unsigned RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC,
MachineBasicBlock::iterator UseMI;
ArrayRef<MCPhysReg> AllocationOrder = RC.getRawAllocationOrder(MF);
std::pair<MCPhysReg, MachineBasicBlock::iterator> P =
- findSurvivorBackwards(*MRI, MBBI, To, LiveUnits, AllocationOrder);
+ findSurvivorBackwards(*MRI, MBBI, To, LiveUnits, AllocationOrder,
+ RestoreAfter);
MCPhysReg Reg = P.first;
MachineBasicBlock::iterator SpillBefore = P.second;
assert(Reg != 0 && "No register left to scavenge!");
@@ -626,7 +633,7 @@ static unsigned scavengeVReg(MachineRegisterInfo &MRI, RegScavenger &RS,
assert(RealDef != nullptr && "Must have at least 1 Def");
#endif
- // We should only have one definition of the register. However to accomodate
+ // We should only have one definition of the register. However to accommodate
// the requirements of two address code we also allow definitions in
// subsequent instructions provided they also read the register. That way
// we get a single contiguous lifetime.
diff --git a/lib/CodeGen/ScheduleDAG.cpp b/lib/CodeGen/ScheduleDAG.cpp
index 3cd270cec3a6..5e95f760aaa2 100644
--- a/lib/CodeGen/ScheduleDAG.cpp
+++ b/lib/CodeGen/ScheduleDAG.cpp
@@ -67,6 +67,41 @@ const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
return &TII->get(Node->getMachineOpcode());
}
+LLVM_DUMP_METHOD
+raw_ostream &SDep::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
+ switch (getKind()) {
+ case Data: OS << "Data"; break;
+ case Anti: OS << "Anti"; break;
+ case Output: OS << "Out "; break;
+ case Order: OS << "Ord "; break;
+ }
+
+ switch (getKind()) {
+ case Data:
+ OS << " Latency=" << getLatency();
+ if (TRI && isAssignedRegDep())
+ OS << " Reg=" << PrintReg(getReg(), TRI);
+ break;
+ case Anti:
+ case Output:
+ OS << " Latency=" << getLatency();
+ break;
+ case Order:
+ OS << " Latency=" << getLatency();
+ switch(Contents.OrdKind) {
+ case Barrier: OS << " Barrier"; break;
+ case MayAliasMem:
+ case MustAliasMem: OS << " Memory"; break;
+ case Artificial: OS << " Artificial"; break;
+ case Weak: OS << " Weak"; break;
+ case Cluster: OS << " Cluster"; break;
+ }
+ break;
+ }
+
+ return OS;
+}
+
bool SUnit::addPred(const SDep &D, bool Required) {
// If this node already has this dependence, don't add a redundant one.
for (SDep &PredDep : Preds) {
@@ -302,16 +337,24 @@ void SUnit::biasCriticalPath() {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD
-void SUnit::print(raw_ostream &OS, const ScheduleDAG *DAG) const {
- if (this == &DAG->ExitSU)
- OS << "ExitSU";
- else if (this == &DAG->EntrySU)
+raw_ostream &SUnit::print(raw_ostream &OS,
+ const SUnit *Entry, const SUnit *Exit) const {
+ if (this == Entry)
OS << "EntrySU";
+ else if (this == Exit)
+ OS << "ExitSU";
else
OS << "SU(" << NodeNum << ")";
+ return OS;
+}
+
+LLVM_DUMP_METHOD
+raw_ostream &SUnit::print(raw_ostream &OS, const ScheduleDAG *G) const {
+ return print(OS, &G->EntrySU, &G->ExitSU);
}
-LLVM_DUMP_METHOD void SUnit::dump(const ScheduleDAG *G) const {
+LLVM_DUMP_METHOD
+void SUnit::dump(const ScheduleDAG *G) const {
print(dbgs(), G);
dbgs() << ": ";
G->dumpNode(this);
@@ -333,40 +376,18 @@ LLVM_DUMP_METHOD void SUnit::dumpAll(const ScheduleDAG *G) const {
if (Preds.size() != 0) {
dbgs() << " Predecessors:\n";
- for (const SDep &SuccDep : Preds) {
- dbgs() << " ";
- switch (SuccDep.getKind()) {
- case SDep::Data: dbgs() << "data "; break;
- case SDep::Anti: dbgs() << "anti "; break;
- case SDep::Output: dbgs() << "out "; break;
- case SDep::Order: dbgs() << "ord "; break;
- }
- SuccDep.getSUnit()->print(dbgs(), G);
- if (SuccDep.isArtificial())
- dbgs() << " *";
- dbgs() << ": Latency=" << SuccDep.getLatency();
- if (SuccDep.isAssignedRegDep())
- dbgs() << " Reg=" << PrintReg(SuccDep.getReg(), G->TRI);
- dbgs() << "\n";
+ for (const SDep &Dep : Preds) {
+ dbgs() << " ";
+ Dep.getSUnit()->print(dbgs(), G); dbgs() << ": ";
+ Dep.print(dbgs(), G->TRI); dbgs() << '\n';
}
}
if (Succs.size() != 0) {
dbgs() << " Successors:\n";
- for (const SDep &SuccDep : Succs) {
- dbgs() << " ";
- switch (SuccDep.getKind()) {
- case SDep::Data: dbgs() << "data "; break;
- case SDep::Anti: dbgs() << "anti "; break;
- case SDep::Output: dbgs() << "out "; break;
- case SDep::Order: dbgs() << "ord "; break;
- }
- SuccDep.getSUnit()->print(dbgs(), G);
- if (SuccDep.isArtificial())
- dbgs() << " *";
- dbgs() << ": Latency=" << SuccDep.getLatency();
- if (SuccDep.isAssignedRegDep())
- dbgs() << " Reg=" << PrintReg(SuccDep.getReg(), G->TRI);
- dbgs() << "\n";
+ for (const SDep &Dep : Succs) {
+ dbgs() << " ";
+ Dep.getSUnit()->print(dbgs(), G); dbgs() << ": ";
+ Dep.print(dbgs(), G->TRI); dbgs() << '\n';
}
}
}
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index 0f70b0e9ca07..ccd937950a74 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -63,7 +63,7 @@
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden,
cl::ZeroOrMore, cl::init(false),
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d901af727686..71382c18fdf9 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -400,6 +400,7 @@ namespace {
SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N);
SDValue reduceBuildVecToShuffle(SDNode *N);
+ SDValue reduceBuildVecToTrunc(SDNode *N);
SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N,
ArrayRef<int> VectorMask, SDValue VecIn1,
SDValue VecIn2, unsigned LeftIdx);
@@ -5267,14 +5268,40 @@ SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
}
SDValue DAGCombiner::visitRotate(SDNode *N) {
+ SDLoc dl(N);
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+
+ // fold (rot x, 0) -> x
+ if (isNullConstantOrNullSplatConstant(N1))
+ return N0;
+
// fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
- if (N->getOperand(1).getOpcode() == ISD::TRUNCATE &&
- N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) {
- if (SDValue NewOp1 =
- distributeTruncateThroughAnd(N->getOperand(1).getNode()))
- return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
- N->getOperand(0), NewOp1);
- }
+ if (N1.getOpcode() == ISD::TRUNCATE &&
+ N1.getOperand(0).getOpcode() == ISD::AND) {
+ if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
+ return DAG.getNode(N->getOpcode(), dl, VT, N0, NewOp1);
+ }
+
+ unsigned NextOp = N0.getOpcode();
+ // fold (rot* (rot* x, c2), c1) -> (rot* x, c1 +- c2 % bitsize)
+ if (NextOp == ISD::ROTL || NextOp == ISD::ROTR)
+ if (SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1))
+ if (SDNode *C2 =
+ DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
+ bool SameSide = (N->getOpcode() == NextOp);
+ unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB;
+ if (SDValue CombinedShift =
+ DAG.FoldConstantArithmetic(CombineOp, dl, VT, C1, C2)) {
+ unsigned Bitsize = VT.getScalarSizeInBits();
+ SDValue BitsizeC = DAG.getConstant(Bitsize, dl, VT);
+ SDValue CombinedShiftNorm = DAG.FoldConstantArithmetic(
+ ISD::SREM, dl, VT, CombinedShift.getNode(), BitsizeC.getNode());
+ return DAG.getNode(
+ N->getOpcode(), dl, VT, N0->getOperand(0), CombinedShiftNorm);
+ }
+ }
return SDValue();
}
@@ -6091,19 +6118,22 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
SDValue N2 = N->getOperand(2);
EVT VT = N->getValueType(0);
EVT VT0 = N0.getValueType();
+ SDLoc DL(N);
// fold (select C, X, X) -> X
if (N1 == N2)
return N1;
+
if (const ConstantSDNode *N0C = dyn_cast<const ConstantSDNode>(N0)) {
// fold (select true, X, Y) -> X
// fold (select false, X, Y) -> Y
return !N0C->isNullValue() ? N1 : N2;
}
+
// fold (select X, X, Y) -> (or X, Y)
// fold (select X, 1, Y) -> (or C, Y)
if (VT == VT0 && VT == MVT::i1 && (N0 == N1 || isOneConstant(N1)))
- return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
+ return DAG.getNode(ISD::OR, DL, VT, N0, N2);
if (SDValue V = foldSelectOfConstants(N))
return V;
@@ -6112,22 +6142,22 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
AddToWorklist(NOTNode.getNode());
- return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2);
+ return DAG.getNode(ISD::AND, DL, VT, NOTNode, N2);
}
// fold (select C, X, 1) -> (or (not C), X)
if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
AddToWorklist(NOTNode.getNode());
- return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1);
+ return DAG.getNode(ISD::OR, DL, VT, NOTNode, N1);
}
// fold (select X, Y, X) -> (and X, Y)
// fold (select X, Y, 0) -> (and X, Y)
if (VT == VT0 && VT == MVT::i1 && (N0 == N2 || isNullConstant(N2)))
- return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
+ return DAG.getNode(ISD::AND, DL, VT, N0, N1);
// If we can fold this based on the true/false value, do so.
if (SimplifySelectOps(N, N1, N2))
- return SDValue(N, 0); // Don't revisit N.
+ return SDValue(N, 0); // Don't revisit N.
if (VT0 == MVT::i1) {
// The code in this block deals with the following 2 equivalences:
@@ -6138,27 +6168,27 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
// to the right anyway if we find the inner select exists in the DAG anyway
// and we always transform to the left side if we know that we can further
// optimize the combination of the conditions.
- bool normalizeToSequence
- = TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT);
+ bool normalizeToSequence =
+ TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT);
// select (and Cond0, Cond1), X, Y
// -> select Cond0, (select Cond1, X, Y), Y
if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) {
SDValue Cond0 = N0->getOperand(0);
SDValue Cond1 = N0->getOperand(1);
- SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N),
- N1.getValueType(), Cond1, N1, N2);
+ SDValue InnerSelect =
+ DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond1, N1, N2);
if (normalizeToSequence || !InnerSelect.use_empty())
- return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0,
+ return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0,
InnerSelect, N2);
}
// select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y)
if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) {
SDValue Cond0 = N0->getOperand(0);
SDValue Cond1 = N0->getOperand(1);
- SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N),
- N1.getValueType(), Cond1, N1, N2);
+ SDValue InnerSelect =
+ DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond1, N1, N2);
if (normalizeToSequence || !InnerSelect.use_empty())
- return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, N1,
+ return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0, N1,
InnerSelect);
}
@@ -6170,15 +6200,13 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) {
// Create the actual and node if we can generate good code for it.
if (!normalizeToSequence) {
- SDValue And = DAG.getNode(ISD::AND, SDLoc(N), N0.getValueType(),
- N0, N1_0);
- return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), And,
- N1_1, N2);
+ SDValue And = DAG.getNode(ISD::AND, DL, N0.getValueType(), N0, N1_0);
+ return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), And, N1_1, N2);
}
// Otherwise see if we can optimize the "and" to a better pattern.
if (SDValue Combined = visitANDLike(N0, N1_0, N))
- return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined,
- N1_1, N2);
+ return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1_1,
+ N2);
}
}
// select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y
@@ -6189,15 +6217,13 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) {
// Create the actual or node if we can generate good code for it.
if (!normalizeToSequence) {
- SDValue Or = DAG.getNode(ISD::OR, SDLoc(N), N0.getValueType(),
- N0, N2_0);
- return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Or,
- N1, N2_2);
+ SDValue Or = DAG.getNode(ISD::OR, DL, N0.getValueType(), N0, N2_0);
+ return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Or, N1, N2_2);
}
// Otherwise see if we can optimize to a better pattern.
if (SDValue Combined = visitORLike(N0, N2_0, N))
- return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined,
- N1, N2_2);
+ return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1,
+ N2_2);
}
}
}
@@ -6208,8 +6234,7 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) {
SDValue Cond0 = N0->getOperand(0);
if (C->isOne())
- return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(),
- Cond0, N2, N1);
+ return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0, N2, N1);
}
}
}
@@ -6226,24 +6251,21 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
// FIXME: Instead of testing for UnsafeFPMath, this should be checking for
// no signed zeros as well as no nans.
const TargetOptions &Options = DAG.getTarget().Options;
- if (Options.UnsafeFPMath &&
- VT.isFloatingPoint() && N0.hasOneUse() &&
+ if (Options.UnsafeFPMath && VT.isFloatingPoint() && N0.hasOneUse() &&
DAG.isKnownNeverNaN(N1) && DAG.isKnownNeverNaN(N2)) {
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
- if (SDValue FMinMax = combineMinNumMaxNum(SDLoc(N), VT, N0.getOperand(0),
- N0.getOperand(1), N1, N2, CC,
- TLI, DAG))
+ if (SDValue FMinMax = combineMinNumMaxNum(
+ DL, VT, N0.getOperand(0), N0.getOperand(1), N1, N2, CC, TLI, DAG))
return FMinMax;
}
if ((!LegalOperations &&
TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) ||
TLI.isOperationLegal(ISD::SELECT_CC, VT))
- return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT,
- N0.getOperand(0), N0.getOperand(1),
- N1, N2, N0.getOperand(2));
- return SimplifySelect(SDLoc(N), N0, N1, N2);
+ return DAG.getNode(ISD::SELECT_CC, DL, VT, N0.getOperand(0),
+ N0.getOperand(1), N1, N2, N0.getOperand(2));
+ return SimplifySelect(DL, N0, N1, N2);
}
return SDValue();
@@ -11045,7 +11067,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
// x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store)
//
// where x0, x1, y0 and y1 in {-1, 1} are given by the types of the
- // indexed load/store and the expresion that needs to be re-written.
+ // indexed load/store and the expression that needs to be re-written.
//
// Therefore, we have:
// t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1
@@ -11379,7 +11401,7 @@ namespace {
/// Shift = srl Ty1 Origin, CstTy Amount
/// Inst = trunc Shift to Ty2
///
-/// Then, it will be rewriten into:
+/// Then, it will be rewritten into:
/// Slice = load SliceTy, Base + SliceOffset
/// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2
///
@@ -12694,7 +12716,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits);
bool IsFast = false;
if (TLI.isTypeLegal(StoreTy) &&
- TLI.canMergeStoresTo(FirstStoreAS, StoreTy) &&
+ TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
FirstStoreAlign, &IsFast) &&
IsFast) {
@@ -12706,7 +12728,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
EVT LegalizedStoredValueTy =
TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
- TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValueTy) &&
+ TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValueTy, DAG) &&
TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
FirstStoreAS, FirstStoreAlign, &IsFast) &&
IsFast) {
@@ -12723,7 +12745,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
!NoVectors) {
// Find a legal type for the vector store.
EVT Ty = EVT::getVectorVT(Context, MemVT, i + 1);
- if (TLI.isTypeLegal(Ty) && TLI.canMergeStoresTo(FirstStoreAS, Ty) &&
+ if (TLI.isTypeLegal(Ty) &&
+ TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
FirstStoreAlign, &IsFast) &&
IsFast)
@@ -12781,7 +12804,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
EVT Ty =
EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
bool IsFast;
- if (TLI.isTypeLegal(Ty) && TLI.canMergeStoresTo(FirstStoreAS, Ty) &&
+ if (TLI.isTypeLegal(Ty) &&
+ TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
FirstStoreAlign, &IsFast) &&
IsFast)
@@ -12898,7 +12922,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
EVT StoreTy = EVT::getVectorVT(Context, MemVT, i + 1);
bool IsFastSt, IsFastLd;
if (TLI.isTypeLegal(StoreTy) &&
- TLI.canMergeStoresTo(FirstStoreAS, StoreTy) &&
+ TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
FirstStoreAlign, &IsFastSt) &&
IsFastSt &&
@@ -12912,7 +12936,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
StoreTy = EVT::getIntegerVT(Context, SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
- TLI.canMergeStoresTo(FirstStoreAS, StoreTy) &&
+ TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
FirstStoreAlign, &IsFastSt) &&
IsFastSt &&
@@ -12926,7 +12950,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy = TLI.getTypeToTransformTo(Context, StoreTy);
if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
- TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValueTy) &&
+ TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValueTy, DAG) &&
TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy,
StoreTy) &&
TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy,
@@ -14228,6 +14252,73 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
return Shuffles[0];
}
+// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
+// operations which can be matched to a truncate.
+SDValue DAGCombiner::reduceBuildVecToTrunc(SDNode *N) {
+ // TODO: Add support for big-endian.
+ if (DAG.getDataLayout().isBigEndian())
+ return SDValue();
+ if (N->getNumOperands() < 2)
+ return SDValue();
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ unsigned NumElems = N->getNumOperands();
+
+ if (!isTypeLegal(VT))
+ return SDValue();
+
+ // If the input is something other than an EXTRACT_VECTOR_ELT with a constant
+ // index, bail out.
+ // TODO: Allow undef elements in some cases?
+ if (any_of(N->ops(), [VT](SDValue Op) {
+ return Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+ !isa<ConstantSDNode>(Op.getOperand(1)) ||
+ Op.getValueType() != VT.getVectorElementType();
+ }))
+ return SDValue();
+
+ // Helper for obtaining an EXTRACT_VECTOR_ELT's constant index
+ auto GetExtractIdx = [](SDValue Extract) {
+ return cast<ConstantSDNode>(Extract.getOperand(1))->getSExtValue();
+ };
+
+ // The first BUILD_VECTOR operand must be an an extract from index zero
+ // (assuming no undef and little-endian).
+ if (GetExtractIdx(N->getOperand(0)) != 0)
+ return SDValue();
+
+ // Compute the stride from the first index.
+ int Stride = GetExtractIdx(N->getOperand(1));
+ SDValue ExtractedFromVec = N->getOperand(0).getOperand(0);
+
+ // Proceed only if the stride and the types can be matched to a truncate.
+ if ((Stride == 1 || !isPowerOf2_32(Stride)) ||
+ (ExtractedFromVec.getValueType().getVectorNumElements() !=
+ Stride * NumElems) ||
+ (VT.getScalarSizeInBits() * Stride > 64))
+ return SDValue();
+
+ // Check remaining operands are consistent with the computed stride.
+ for (unsigned i = 1; i != NumElems; ++i) {
+ SDValue Op = N->getOperand(i);
+
+ if ((Op.getOperand(0) != ExtractedFromVec) ||
+ (GetExtractIdx(Op) != Stride * i))
+ return SDValue();
+ }
+
+ // All checks were ok, construct the truncate.
+ LLVMContext &Ctx = *DAG.getContext();
+ EVT NewVT = VT.getVectorVT(
+ Ctx, EVT::getIntegerVT(Ctx, VT.getScalarSizeInBits() * Stride), NumElems);
+ EVT TruncVT =
+ VT.isFloatingPoint() ? VT.changeVectorElementTypeToInteger() : VT;
+
+ SDValue Res = DAG.getBitcast(NewVT, ExtractedFromVec);
+ Res = DAG.getNode(ISD::TRUNCATE, SDLoc(N), TruncVT, Res);
+ return DAG.getBitcast(VT, Res);
+}
+
SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
EVT VT = N->getValueType(0);
@@ -14270,6 +14361,10 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
if (SDValue V = reduceBuildVecConvertToConvertBuildVec(N))
return V;
+ if (TLI.isDesirableToCombineBuildVectorToTruncate())
+ if (SDValue V = reduceBuildVecToTrunc(N))
+ return V;
+
if (SDValue V = reduceBuildVecToShuffle(N))
return V;
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index b235e19aaab2..b96c96f0b4df 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -589,7 +589,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
} else
AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
IsClone, IsCloned);
- // Add the subregster being inserted
+ // Add the subregister being inserted
AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
IsClone, IsCloned);
MIB.addImm(SubIdx);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 873b2bd48f1e..7e4bc3ccb5d3 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1991,7 +1991,8 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
std::move(Args))
.setTailCall(isTailCall)
.setSExtResult(isSigned)
- .setZExtResult(!isSigned);
+ .setZExtResult(!isSigned)
+ .setIsPostTypeLegalization(true);
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
@@ -2029,7 +2030,8 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
.setLibCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee,
std::move(Args))
.setSExtResult(isSigned)
- .setZExtResult(!isSigned);
+ .setZExtResult(!isSigned)
+ .setIsPostTypeLegalization(true);
std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI);
@@ -3565,16 +3567,10 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl);
}
- BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
- DAG.getIntPtrConstant(0, dl));
- TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
- DAG.getIntPtrConstant(1, dl));
- // Ret is a node with an illegal type. Because such things are not
- // generally permitted during this phase of legalization, make sure the
- // node has no more uses. The above EXTRACT_ELEMENT nodes should have been
- // folded.
- assert(Ret->use_empty() &&
- "Unexpected uses of illegally type from expanded lib call.");
+ assert(Ret.getOpcode() == ISD::MERGE_VALUES &&
+ "Ret value is a collection of constituent nodes holding result.");
+ BottomHalf = Ret.getOperand(0);
+ TopHalf = Ret.getOperand(1);
}
if (isSigned) {
diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index c1cb5d9b5235..eaf177d0661b 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -112,15 +112,15 @@ bool DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
case ISD::VAARG: R = SoftenFloatRes_VAARG(N); break;
}
- // If R is null, the sub-method took care of registering the result.
- if (R.getNode()) {
+ if (R.getNode() && R.getNode() != N) {
SetSoftenedFloat(SDValue(N, ResNo), R);
- ReplaceSoftenFloatResult(N, ResNo, R);
+ // Return true only if the node is changed, assuming that the operands
+ // are also converted when necessary.
+ return true;
}
- // Return true only if the node is changed,
- // assuming that the operands are also converted when necessary.
+
// Otherwise, return false to tell caller to scan operands.
- return R.getNode() && R.getNode() != N;
+ return false;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_BITCAST(SDNode *N, unsigned ResNo) {
@@ -753,12 +753,17 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
llvm_unreachable("Do not know how to soften this operator's operand!");
case ISD::BITCAST: Res = SoftenFloatOp_BITCAST(N); break;
+ case ISD::CopyToReg: Res = SoftenFloatOp_COPY_TO_REG(N); break;
case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break;
+ case ISD::FABS: Res = SoftenFloatOp_FABS(N); break;
+ case ISD::FCOPYSIGN: Res = SoftenFloatOp_FCOPYSIGN(N); break;
+ case ISD::FNEG: Res = SoftenFloatOp_FNEG(N); break;
case ISD::FP_EXTEND: Res = SoftenFloatOp_FP_EXTEND(N); break;
case ISD::FP_TO_FP16: // Same as FP_ROUND for softening purposes
case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = SoftenFloatOp_FP_TO_XINT(N); break;
+ case ISD::SELECT: Res = SoftenFloatOp_SELECT(N); break;
case ISD::SELECT_CC: Res = SoftenFloatOp_SELECT_CC(N); break;
case ISD::SETCC: Res = SoftenFloatOp_SETCC(N); break;
case ISD::STORE:
@@ -791,9 +796,9 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
bool DAGTypeLegalizer::CanSkipSoftenFloatOperand(SDNode *N, unsigned OpNo) {
if (!isLegalInHWReg(N->getOperand(OpNo).getValueType()))
return false;
- // When the operand type can be kept in registers, SoftenFloatResult
- // will call ReplaceValueWith to replace all references and we can
- // skip softening this operand.
+
+ // When the operand type can be kept in registers there is nothing to do for
+ // the following opcodes.
switch (N->getOperand(OpNo).getOpcode()) {
case ISD::BITCAST:
case ISD::ConstantFP:
@@ -807,18 +812,12 @@ bool DAGTypeLegalizer::CanSkipSoftenFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::SELECT_CC:
return true;
}
- // For some opcodes, SoftenFloatResult handles all conversion of softening
- // and replacing operands, so that there is no need to soften operands
- // again, although such opcode could be scanned for other illegal operands.
+
switch (N->getOpcode()) {
- case ISD::ConstantFP:
- case ISD::CopyFromReg:
- case ISD::CopyToReg:
- case ISD::FABS:
- case ISD::FCOPYSIGN:
- case ISD::FNEG:
- case ISD::Register:
- case ISD::SELECT:
+ case ISD::ConstantFP: // Leaf node.
+ case ISD::CopyFromReg: // Operand is a register that we know to be left
+ // unchanged by SoftenFloatResult().
+ case ISD::Register: // Leaf node.
return true;
}
return false;
@@ -829,6 +828,21 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BITCAST(SDNode *N) {
GetSoftenedFloat(N->getOperand(0)));
}
+SDValue DAGTypeLegalizer::SoftenFloatOp_COPY_TO_REG(SDNode *N) {
+ SDValue Op1 = GetSoftenedFloat(N->getOperand(1));
+ SDValue Op2 = GetSoftenedFloat(N->getOperand(2));
+
+ if (Op1 == N->getOperand(1) && Op2 == N->getOperand(2))
+ return SDValue();
+
+ if (N->getNumOperands() == 3)
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2), 0);
+
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2,
+ N->getOperand(3)),
+ 0);
+}
+
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_EXTEND(SDNode *N) {
// If we get here, the result must be legal but the source illegal.
EVT SVT = N->getOperand(0).getValueType();
@@ -884,6 +898,34 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
0);
}
+SDValue DAGTypeLegalizer::SoftenFloatOp_FABS(SDNode *N) {
+ SDValue Op = GetSoftenedFloat(N->getOperand(0));
+
+ if (Op == N->getOperand(0))
+ return SDValue();
+
+ return SDValue(DAG.UpdateNodeOperands(N, Op), 0);
+}
+
+SDValue DAGTypeLegalizer::SoftenFloatOp_FCOPYSIGN(SDNode *N) {
+ SDValue Op0 = GetSoftenedFloat(N->getOperand(0));
+ SDValue Op1 = GetSoftenedFloat(N->getOperand(1));
+
+ if (Op0 == N->getOperand(0) && Op1 == N->getOperand(1))
+ return SDValue();
+
+ return SDValue(DAG.UpdateNodeOperands(N, Op0, Op1), 0);
+}
+
+SDValue DAGTypeLegalizer::SoftenFloatOp_FNEG(SDNode *N) {
+ SDValue Op = GetSoftenedFloat(N->getOperand(0));
+
+ if (Op == N->getOperand(0))
+ return SDValue();
+
+ return SDValue(DAG.UpdateNodeOperands(N, Op), 0);
+}
+
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT(SDNode *N) {
bool Signed = N->getOpcode() == ISD::FP_TO_SINT;
EVT SVT = N->getOperand(0).getValueType();
@@ -913,6 +955,17 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT(SDNode *N) {
return DAG.getNode(ISD::TRUNCATE, dl, RVT, Res);
}
+SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT(SDNode *N) {
+ SDValue Op1 = GetSoftenedFloat(N->getOperand(1));
+ SDValue Op2 = GetSoftenedFloat(N->getOperand(2));
+
+ if (Op1 == N->getOperand(1) && Op2 == N->getOperand(2))
+ return SDValue();
+
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2),
+ 0);
+}
+
SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 154af46c9446..001eed9fb8f6 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -80,6 +80,7 @@ void DAGTypeLegalizer::PerformExpensiveChecks() {
for (unsigned i = 0, e = Node.getNumValues(); i != e; ++i) {
SDValue Res(&Node, i);
+ EVT VT = Res.getValueType();
bool Failed = false;
unsigned Mapped = 0;
@@ -129,13 +130,17 @@ void DAGTypeLegalizer::PerformExpensiveChecks() {
dbgs() << "Unprocessed value in a map!";
Failed = true;
}
- } else if (isTypeLegal(Res.getValueType()) || IgnoreNodeResults(&Node)) {
+ } else if (isTypeLegal(VT) || IgnoreNodeResults(&Node)) {
if (Mapped > 1) {
dbgs() << "Value with legal type was transformed!";
Failed = true;
}
} else {
- if (Mapped == 0) {
+ // If the value can be kept in HW registers, softening machinery can
+ // leave it unchanged and don't put it to any map.
+ if (Mapped == 0 &&
+ !(getTypeAction(VT) == TargetLowering::TypeSoftenFloat &&
+ isLegalInHWReg(VT))) {
dbgs() << "Processed value not in any map!";
Failed = true;
} else if (Mapped & (Mapped - 1)) {
@@ -331,11 +336,6 @@ ScanOperands:
if (NeedsReanalyzing) {
assert(N->getNodeId() == ReadyToProcess && "Node ID recalculated?");
- // Remove any result values from SoftenedFloats as N will be revisited
- // again.
- for (unsigned i = 0, NumResults = N->getNumValues(); i < NumResults; ++i)
- SoftenedFloats.erase(SDValue(N, i));
-
N->setNodeId(NewNode);
// Recompute the NodeId and correct processed operands, adding the node to
// the worklist if ready.
@@ -754,8 +754,6 @@ void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
// new uses of From due to CSE. If this happens, replace the new uses of
// From with To.
} while (!From.use_empty());
-
- SoftenedFloats.erase(From);
}
void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) {
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 8e999188d8e1..e102df5e913d 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -416,16 +416,6 @@ private:
}
void SetSoftenedFloat(SDValue Op, SDValue Result);
- // Call ReplaceValueWith(SDValue(N, ResNo), Res) if necessary.
- void ReplaceSoftenFloatResult(SDNode *N, unsigned ResNo, SDValue &NewRes) {
- // When the result type can be kept in HW registers, the converted
- // NewRes node could have the same type. We can save the effort in
- // cloning every user of N in SoftenFloatOperand or other legalization functions,
- // by calling ReplaceValueWith here to update all users.
- if (NewRes.getNode() != N && isLegalInHWReg(N->getValueType(ResNo)))
- ReplaceValueWith(SDValue(N, ResNo), NewRes);
- }
-
// Convert Float Results to Integer for Non-HW-supported Operations.
bool SoftenFloatResult(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_MERGE_VALUES(SDNode *N, unsigned ResNo);
@@ -471,17 +461,23 @@ private:
SDValue SoftenFloatRes_XINT_TO_FP(SDNode *N);
// Return true if we can skip softening the given operand or SDNode because
- // it was soften before by SoftenFloatResult and references to the operand
- // were replaced by ReplaceValueWith.
+ // either it was soften before by SoftenFloatResult and references to the
+ // operand were replaced by ReplaceValueWith or it's value type is legal in HW
+ // registers and the operand can be left unchanged.
bool CanSkipSoftenFloatOperand(SDNode *N, unsigned OpNo);
// Convert Float Operand to Integer for Non-HW-supported Operations.
bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
SDValue SoftenFloatOp_BITCAST(SDNode *N);
+ SDValue SoftenFloatOp_COPY_TO_REG(SDNode *N);
SDValue SoftenFloatOp_BR_CC(SDNode *N);
+ SDValue SoftenFloatOp_FABS(SDNode *N);
+ SDValue SoftenFloatOp_FCOPYSIGN(SDNode *N);
+ SDValue SoftenFloatOp_FNEG(SDNode *N);
SDValue SoftenFloatOp_FP_EXTEND(SDNode *N);
SDValue SoftenFloatOp_FP_ROUND(SDNode *N);
SDValue SoftenFloatOp_FP_TO_XINT(SDNode *N);
+ SDValue SoftenFloatOp_SELECT(SDNode *N);
SDValue SoftenFloatOp_SELECT_CC(SDNode *N);
SDValue SoftenFloatOp_SETCC(SDNode *N);
SDValue SoftenFloatOp_STORE(SDNode *N, unsigned OpNo);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index aa69e0e2adfc..f3306151d864 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -57,7 +57,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Expand the floating point operand only if it was converted to integers.
// Otherwise, it is a legal type like f128 that can be saved in a register.
auto SoftenedOp = GetSoftenedFloat(InOp);
- if (SoftenedOp == InOp)
+ if (isLegalInHWReg(SoftenedOp.getValueType()))
break;
SplitInteger(SoftenedOp, Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index ff0e609803d8..d41054b15bbc 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -2977,7 +2977,11 @@ SDValue DAGTypeLegalizer::convertMask(SDValue InMask, EVT MaskVT,
// Currently a SETCC or a AND/OR/XOR with two SETCCs are handled.
unsigned InMaskOpc = InMask->getOpcode();
+
+ // FIXME: This code seems to be too restrictive, we might consider
+ // generalizing it or dropping it.
assert((InMaskOpc == ISD::SETCC ||
+ ISD::isBuildVectorOfConstantSDNodes(InMask.getNode()) ||
(isLogicalMaskOp(InMaskOpc) &&
isSETCCorConvertedSETCC(InMask->getOperand(0)) &&
isSETCCorConvertedSETCC(InMask->getOperand(1)))) &&
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 98553152117d..823e77850c4b 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -34,6 +34,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
+#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -5442,7 +5443,7 @@ SDValue SelectionDAG::getAtomicCmpSwap(
unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain,
SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment, AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
+ AtomicOrdering FailureOrdering, SyncScope::ID SSID) {
assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
@@ -5458,7 +5459,7 @@ SDValue SelectionDAG::getAtomicCmpSwap(
MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
- AAMDNodes(), nullptr, SynchScope, SuccessOrdering,
+ AAMDNodes(), nullptr, SSID, SuccessOrdering,
FailureOrdering);
return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO);
@@ -5480,7 +5481,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDValue Chain, SDValue Ptr, SDValue Val,
const Value *PtrVal, unsigned Alignment,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
@@ -5500,7 +5501,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
MemVT.getStoreSize(), Alignment, AAMDNodes(),
- nullptr, SynchScope, Ordering);
+ nullptr, SSID, Ordering);
return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
}
@@ -7630,45 +7631,13 @@ bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
SDValue Loc = LD->getOperand(1);
SDValue BaseLoc = Base->getOperand(1);
- if (Loc.getOpcode() == ISD::FrameIndex) {
- if (BaseLoc.getOpcode() != ISD::FrameIndex)
- return false;
- const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
- int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
- int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
- int FS = MFI.getObjectSize(FI);
- int BFS = MFI.getObjectSize(BFI);
- if (FS != BFS || FS != (int)Bytes) return false;
- return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
- }
-
- // Handle X + C.
- if (isBaseWithConstantOffset(Loc)) {
- int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
- if (Loc.getOperand(0) == BaseLoc) {
- // If the base location is a simple address with no offset itself, then
- // the second load's first add operand should be the base address.
- if (LocOffset == Dist * (int)Bytes)
- return true;
- } else if (isBaseWithConstantOffset(BaseLoc)) {
- // The base location itself has an offset, so subtract that value from the
- // second load's offset before comparing to distance * size.
- int64_t BOffset =
- cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
- if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
- if ((LocOffset - BOffset) == Dist * (int)Bytes)
- return true;
- }
- }
- }
- const GlobalValue *GV1 = nullptr;
- const GlobalValue *GV2 = nullptr;
- int64_t Offset1 = 0;
- int64_t Offset2 = 0;
- bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
- bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
- if (isGA1 && isGA2 && GV1 == GV2)
- return Offset1 == (Offset2 + Dist*Bytes);
+
+ auto BaseLocDecomp = BaseIndexOffset::match(BaseLoc, *this);
+ auto LocDecomp = BaseIndexOffset::match(Loc, *this);
+
+ int64_t Offset = 0;
+ if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
+ return (Dist * Bytes == Offset);
return false;
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
index 4e899ae6668e..0d69441ebb7f 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
@@ -37,13 +37,13 @@ bool BaseIndexOffset::equalBaseIndex(BaseIndexOffset &Other,
const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
- // Match non-equal FrameIndexes - a FrameIndex stemming from an
- // alloca will not have it's ObjectOffset set until post-DAG and
- // as such we must assume the two framesIndices are incomparable.
+ // Match non-equal FrameIndexes - If both frame indices are fixed
+ // we know their relative offsets and can compare them. Otherwise
+ // we must be conservative.
if (auto *A = dyn_cast<FrameIndexSDNode>(Base))
if (auto *B = dyn_cast<FrameIndexSDNode>(Other.Base))
- if (!MFI.getObjectAllocation(A->getIndex()) &&
- !MFI.getObjectAllocation(B->getIndex())) {
+ if (MFI.isFixedObjectIndex(A->getIndex()) &&
+ MFI.isFixedObjectIndex(B->getIndex())) {
Off += MFI.getObjectOffset(B->getIndex()) -
MFI.getObjectOffset(A->getIndex());
return true;
@@ -60,12 +60,18 @@ BaseIndexOffset BaseIndexOffset::match(SDValue Ptr, const SelectionDAG &DAG) {
int64_t Offset = 0;
bool IsIndexSignExt = false;
- // Consume constant adds
- while (Base->getOpcode() == ISD::ADD &&
- isa<ConstantSDNode>(Base->getOperand(1))) {
- int64_t POffset = cast<ConstantSDNode>(Base->getOperand(1))->getSExtValue();
- Offset += POffset;
- Base = Base->getOperand(0);
+ // Consume constant adds & ors with appropriate masking.
+ while (Base->getOpcode() == ISD::ADD || Base->getOpcode() == ISD::OR) {
+ if (auto *C = dyn_cast<ConstantSDNode>(Base->getOperand(1))) {
+ // Only consider ORs which act as adds.
+ if (Base->getOpcode() == ISD::OR &&
+ !DAG.MaskedValueIsZero(Base->getOperand(0), C->getAPIntValue()))
+ break;
+ Offset += C->getSExtValue();
+ Base = Base->getOperand(0);
+ continue;
+ }
+ break;
}
if (Base->getOpcode() == ISD::ADD) {
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index acf68fbbdedf..41c3f5f235ea 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3220,7 +3220,13 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
setValue(&I, DAG.getBuildVector(VT, DL, Ops));
}
-void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
+void SelectionDAGBuilder::visitInsertValue(const User &I) {
+ ArrayRef<unsigned> Indices;
+ if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
+ Indices = IV->getIndices();
+ else
+ Indices = cast<ConstantExpr>(&I)->getIndices();
+
const Value *Op0 = I.getOperand(0);
const Value *Op1 = I.getOperand(1);
Type *AggTy = I.getType();
@@ -3228,7 +3234,7 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
bool IntoUndef = isa<UndefValue>(Op0);
bool FromUndef = isa<UndefValue>(Op1);
- unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
+ unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 4> AggValueVTs;
@@ -3268,13 +3274,19 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
DAG.getVTList(AggValueVTs), Values));
}
-void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
+void SelectionDAGBuilder::visitExtractValue(const User &I) {
+ ArrayRef<unsigned> Indices;
+ if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
+ Indices = EV->getIndices();
+ else
+ Indices = cast<ConstantExpr>(&I)->getIndices();
+
const Value *Op0 = I.getOperand(0);
Type *AggTy = Op0->getType();
Type *ValTy = I.getType();
bool OutOfUndef = isa<UndefValue>(Op0);
- unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
+ unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 4> ValValueVTs;
@@ -3559,6 +3571,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
MMOFlags |= MachineMemOperand::MOInvariant;
if (isDereferenceable)
MMOFlags |= MachineMemOperand::MODereferenceable;
+ MMOFlags |= TLI.getMMOFlags(I);
SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
MachinePointerInfo(SV, Offsets[i]), Alignment,
@@ -3688,6 +3701,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
MMOFlags |= MachineMemOperand::MOVolatile;
if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
MMOFlags |= MachineMemOperand::MONonTemporal;
+ MMOFlags |= TLI.getMMOFlags(I);
// An aggregate load cannot wrap around the address space, so offsets to its
// parts don't wrap either.
@@ -3978,7 +3992,7 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering SuccessOrder = I.getSuccessOrdering();
AtomicOrdering FailureOrder = I.getFailureOrdering();
- SynchronizationScope Scope = I.getSynchScope();
+ SyncScope::ID SSID = I.getSyncScopeID();
SDValue InChain = getRoot();
@@ -3988,7 +4002,7 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
- /*Alignment=*/ 0, SuccessOrder, FailureOrder, Scope);
+ /*Alignment=*/ 0, SuccessOrder, FailureOrder, SSID);
SDValue OutChain = L.getValue(2);
@@ -4014,7 +4028,7 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
}
AtomicOrdering Order = I.getOrdering();
- SynchronizationScope Scope = I.getSynchScope();
+ SyncScope::ID SSID = I.getSyncScopeID();
SDValue InChain = getRoot();
@@ -4025,7 +4039,7 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
getValue(I.getPointerOperand()),
getValue(I.getValOperand()),
I.getPointerOperand(),
- /* Alignment=*/ 0, Order, Scope);
+ /* Alignment=*/ 0, Order, SSID);
SDValue OutChain = L.getValue(1);
@@ -4040,7 +4054,7 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
Ops[0] = getRoot();
Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
TLI.getFenceOperandTy(DAG.getDataLayout()));
- Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
+ Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl,
TLI.getFenceOperandTy(DAG.getDataLayout()));
DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
}
@@ -4048,7 +4062,7 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering Order = I.getOrdering();
- SynchronizationScope Scope = I.getSynchScope();
+ SyncScope::ID SSID = I.getSyncScopeID();
SDValue InChain = getRoot();
@@ -4066,7 +4080,7 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
VT.getStoreSize(),
I.getAlignment() ? I.getAlignment() :
DAG.getEVTAlignment(VT),
- AAMDNodes(), nullptr, Scope, Order);
+ AAMDNodes(), nullptr, SSID, Order);
InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
SDValue L =
@@ -4083,7 +4097,7 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering Order = I.getOrdering();
- SynchronizationScope Scope = I.getSynchScope();
+ SyncScope::ID SSID = I.getSyncScopeID();
SDValue InChain = getRoot();
@@ -4100,7 +4114,7 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
getValue(I.getPointerOperand()),
getValue(I.getValueOperand()),
I.getPointerOperand(), I.getAlignment(),
- Order, Scope);
+ Order, SSID);
DAG.setRoot(OutChain);
}
@@ -4982,6 +4996,83 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.setRoot(CallResult.second);
return nullptr;
}
+ case Intrinsic::memmove_element_unordered_atomic: {
+ auto &MI = cast<ElementUnorderedAtomicMemMoveInst>(I);
+ SDValue Dst = getValue(MI.getRawDest());
+ SDValue Src = getValue(MI.getRawSource());
+ SDValue Length = getValue(MI.getLength());
+
+ // Emit a library call.
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
+ Entry.Node = Dst;
+ Args.push_back(Entry);
+
+ Entry.Node = Src;
+ Args.push_back(Entry);
+
+ Entry.Ty = MI.getLength()->getType();
+ Entry.Node = Length;
+ Args.push_back(Entry);
+
+ uint64_t ElementSizeConstant = MI.getElementSizeInBytes();
+ RTLIB::Libcall LibraryCall =
+ RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElementSizeConstant);
+ if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
+ report_fatal_error("Unsupported element size");
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
+ TLI.getLibcallCallingConv(LibraryCall),
+ Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol(TLI.getLibcallName(LibraryCall),
+ TLI.getPointerTy(DAG.getDataLayout())),
+ std::move(Args));
+
+ std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
+ DAG.setRoot(CallResult.second);
+ return nullptr;
+ }
+ case Intrinsic::memset_element_unordered_atomic: {
+ auto &MI = cast<ElementUnorderedAtomicMemSetInst>(I);
+ SDValue Dst = getValue(MI.getRawDest());
+ SDValue Val = getValue(MI.getValue());
+ SDValue Length = getValue(MI.getLength());
+
+ // Emit a library call.
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
+ Entry.Node = Dst;
+ Args.push_back(Entry);
+
+ Entry.Ty = Type::getInt8Ty(*DAG.getContext());
+ Entry.Node = Val;
+ Args.push_back(Entry);
+
+ Entry.Ty = MI.getLength()->getType();
+ Entry.Node = Length;
+ Args.push_back(Entry);
+
+ uint64_t ElementSizeConstant = MI.getElementSizeInBytes();
+ RTLIB::Libcall LibraryCall =
+ RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElementSizeConstant);
+ if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
+ report_fatal_error("Unsupported element size");
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
+ TLI.getLibcallCallingConv(LibraryCall),
+ Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol(TLI.getLibcallName(LibraryCall),
+ TLI.getPointerTy(DAG.getDataLayout())),
+ std::move(Args));
+
+ std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
+ DAG.setRoot(CallResult.second);
+ return nullptr;
+ }
case Intrinsic::dbg_declare: {
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
DILocalVariable *Variable = DI.getVariable();
@@ -7842,6 +7933,22 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
auto &DL = CLI.DAG.getDataLayout();
ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
+ if (CLI.IsPostTypeLegalization) {
+ // If we are lowering a libcall after legalization, split the return type.
+ SmallVector<EVT, 4> OldRetTys = std::move(RetTys);
+ SmallVector<uint64_t, 4> OldOffsets = std::move(Offsets);
+ for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
+ EVT RetVT = OldRetTys[i];
+ uint64_t Offset = OldOffsets[i];
+ MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
+ unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
+ unsigned RegisterVTSize = RegisterVT.getSizeInBits();
+ RetTys.append(NumRegs, RegisterVT);
+ for (unsigned j = 0; j != NumRegs; ++j)
+ Offsets.push_back(Offset + j * RegisterVTSize);
+ }
+ }
+
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
@@ -7924,6 +8031,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
+ // FIXME: Split arguments if CLI.IsPostTypeLegalization
Type *FinalType = Args[i].Ty;
if (Args[i].IsByVal)
FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 431d52b4b9b9..ac1d6aae65a5 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -38,7 +38,6 @@ class BranchInst;
class CallInst;
class DbgValueInst;
class ExtractElementInst;
-class ExtractValueInst;
class FCmpInst;
class FPExtInst;
class FPToSIInst;
@@ -53,7 +52,6 @@ class IntToPtrInst;
class IndirectBrInst;
class InvokeInst;
class InsertElementInst;
-class InsertValueInst;
class Instruction;
class LoadInst;
class MachineBasicBlock;
@@ -859,8 +857,8 @@ private:
void visitInsertElement(const User &I);
void visitShuffleVector(const User &I);
- void visitExtractValue(const ExtractValueInst &I);
- void visitInsertValue(const InsertValueInst &I);
+ void visitExtractValue(const User &I);
+ void visitInsertValue(const User &I);
void visitLandingPad(const LandingPadInst &I);
void visitGetElementPtr(const User &I);
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index f711ca71f79f..bdf57e805842 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -1483,7 +1483,6 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Try to select the instruction with FastISel.
if (FastIS->selectInstruction(Inst)) {
- FastISelFailed = true;
--NumFastIselRemaining;
++NumFastIselSuccess;
// If fast isel succeeded, skip over all the folded instructions, and
@@ -1506,8 +1505,14 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
continue;
}
+ FastISelFailed = true;
+
// Then handle certain instructions as single-LLVM-Instruction blocks.
- if (isa<CallInst>(Inst)) {
+ // We cannot separate out GCrelocates to their own blocks since we need
+ // to keep track of gc-relocates for a particular gc-statepoint. This is
+ // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
+ // visitGCRelocate.
+ if (isa<CallInst>(Inst) && !isStatepoint(Inst) && !isGCRelocate(Inst)) {
OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
Inst->getDebugLoc(), LLVMBB);
diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp
index 7886737b879c..17a3a84ecda5 100644
--- a/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/lib/CodeGen/SjLjEHPrepare.cpp
@@ -125,8 +125,11 @@ static void MarkBlocksLiveIn(BasicBlock *BB,
if (!LiveBBs.insert(BB).second)
return; // already been here.
- for (BasicBlock *PredBB : predecessors(BB))
- MarkBlocksLiveIn(PredBB, LiveBBs);
+ df_iterator_default_set<BasicBlock*> Visited;
+
+ for (BasicBlock *B : inverse_depth_first_ext(BB, Visited))
+ LiveBBs.insert(B);
+
}
/// substituteLPadValues - Substitute the values returned by the landingpad
diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp
index 008b984dd961..323045fd2aaa 100644
--- a/lib/CodeGen/SplitKit.cpp
+++ b/lib/CodeGen/SplitKit.cpp
@@ -53,10 +53,10 @@ InsertPointAnalysis::computeLastInsertPoint(const LiveInterval &CurLI,
std::pair<SlotIndex, SlotIndex> &LIP = LastInsertPoint[Num];
SlotIndex MBBEnd = LIS.getMBBEndIdx(&MBB);
- SmallVector<const MachineBasicBlock *, 1> EHPadSucessors;
+ SmallVector<const MachineBasicBlock *, 1> EHPadSuccessors;
for (const MachineBasicBlock *SMBB : MBB.successors())
if (SMBB->isEHPad())
- EHPadSucessors.push_back(SMBB);
+ EHPadSuccessors.push_back(SMBB);
// Compute insert points on the first call. The pair is independent of the
// current live interval.
@@ -68,7 +68,7 @@ InsertPointAnalysis::computeLastInsertPoint(const LiveInterval &CurLI,
LIP.first = LIS.getInstructionIndex(*FirstTerm);
// If there is a landing pad successor, also find the call instruction.
- if (EHPadSucessors.empty())
+ if (EHPadSuccessors.empty())
return LIP.first;
// There may not be a call instruction (?) in which case we ignore LPad.
LIP.second = LIP.first;
@@ -87,7 +87,7 @@ InsertPointAnalysis::computeLastInsertPoint(const LiveInterval &CurLI,
if (!LIP.second)
return LIP.first;
- if (none_of(EHPadSucessors, [&](const MachineBasicBlock *EHPad) {
+ if (none_of(EHPadSuccessors, [&](const MachineBasicBlock *EHPad) {
return LIS.isLiveInToMBB(CurLI, EHPad);
}))
return LIP.first;
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index e9d38c10c860..3914ee514712 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -384,6 +384,26 @@ static void InitLibcallNames(const char **Names, const Triple &TT) {
"__llvm_memcpy_element_unordered_atomic_8";
Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_16] =
"__llvm_memcpy_element_unordered_atomic_16";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1] =
+ "__llvm_memmove_element_unordered_atomic_1";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2] =
+ "__llvm_memmove_element_unordered_atomic_2";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4] =
+ "__llvm_memmove_element_unordered_atomic_4";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8] =
+ "__llvm_memmove_element_unordered_atomic_8";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16] =
+ "__llvm_memmove_element_unordered_atomic_16";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_1] =
+ "__llvm_memset_element_unordered_atomic_1";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_2] =
+ "__llvm_memset_element_unordered_atomic_2";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_4] =
+ "__llvm_memset_element_unordered_atomic_4";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_8] =
+ "__llvm_memset_element_unordered_atomic_8";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_16] =
+ "__llvm_memset_element_unordered_atomic_16";
Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
@@ -803,6 +823,40 @@ RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
}
}
+RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
+ switch (ElementSize) {
+ case 1:
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
+ case 2:
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
+ case 4:
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
+ case 8:
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
+ case 16:
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
+ default:
+ return UNKNOWN_LIBCALL;
+ }
+}
+
+RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
+ switch (ElementSize) {
+ case 1:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
+ case 2:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
+ case 4:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
+ case 8:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
+ case 16:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
+ default:
+ return UNKNOWN_LIBCALL;
+ }
+}
+
/// InitCmpLibcallCCs - Set default comparison libcall CC.
///
static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
diff --git a/lib/DebugInfo/CodeView/SymbolDumper.cpp b/lib/DebugInfo/CodeView/SymbolDumper.cpp
index b9fa9b6a6ad7..c2c02f8de03f 100644
--- a/lib/DebugInfo/CodeView/SymbolDumper.cpp
+++ b/lib/DebugInfo/CodeView/SymbolDumper.cpp
@@ -62,6 +62,18 @@ private:
};
}
+static StringRef getSymbolKindName(SymbolKind Kind) {
+ switch (Kind) {
+#define SYMBOL_RECORD(EnumName, EnumVal, Name) \
+ case EnumName: \
+ return #Name;
+#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
+ default:
+ break;
+ }
+ return "UnknownSym";
+}
+
void CVSymbolDumperImpl::printLocalVariableAddrRange(
const LocalVariableAddrRange &Range, uint32_t RelocationOffset) {
DictScope S(W, "LocalVariableAddrRange");
@@ -86,18 +98,23 @@ void CVSymbolDumperImpl::printTypeIndex(StringRef FieldName, TypeIndex TI) {
}
Error CVSymbolDumperImpl::visitSymbolBegin(CVSymbol &CVR) {
+ W.startLine() << getSymbolKindName(CVR.Type);
+ W.getOStream() << " {\n";
+ W.indent();
+ W.printEnum("Kind", unsigned(CVR.Type), getSymbolTypeNames());
return Error::success();
}
Error CVSymbolDumperImpl::visitSymbolEnd(CVSymbol &CVR) {
if (PrintRecordBytes && ObjDelegate)
ObjDelegate->printBinaryBlockWithRelocs("SymData", CVR.content());
+
+ W.unindent();
+ W.startLine() << "}\n";
return Error::success();
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, BlockSym &Block) {
- DictScope S(W, "BlockStart");
-
StringRef LinkageName;
W.printHex("PtrParent", Block.Parent);
W.printHex("PtrEnd", Block.End);
@@ -113,7 +130,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, BlockSym &Block) {
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, Thunk32Sym &Thunk) {
- DictScope S(W, "Thunk32");
W.printNumber("Parent", Thunk.Parent);
W.printNumber("End", Thunk.End);
W.printNumber("Next", Thunk.Next);
@@ -126,7 +142,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, Thunk32Sym &Thunk) {
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
TrampolineSym &Tramp) {
- DictScope S(W, "Trampoline");
W.printEnum("Type", uint16_t(Tramp.Type), getTrampolineNames());
W.printNumber("Size", Tramp.Size);
W.printNumber("ThunkOff", Tramp.ThunkOffset);
@@ -137,7 +152,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, SectionSym &Section) {
- DictScope S(W, "Section");
W.printNumber("SectionNumber", Section.SectionNumber);
W.printNumber("Alignment", Section.Alignment);
W.printNumber("Rva", Section.Rva);
@@ -152,7 +166,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, SectionSym &Section) {
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
CoffGroupSym &CoffGroup) {
- DictScope S(W, "COFF Group");
W.printNumber("Size", CoffGroup.Size);
W.printFlags("Characteristics", CoffGroup.Characteristics,
getImageSectionCharacteristicNames(),
@@ -165,8 +178,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
BPRelativeSym &BPRel) {
- DictScope S(W, "BPRelativeSym");
-
W.printNumber("Offset", BPRel.Offset);
printTypeIndex("Type", BPRel.Type);
W.printString("VarName", BPRel.Name);
@@ -175,16 +186,12 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
BuildInfoSym &BuildInfo) {
- DictScope S(W, "BuildInfo");
-
W.printNumber("BuildId", BuildInfo.BuildId);
return Error::success();
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
CallSiteInfoSym &CallSiteInfo) {
- DictScope S(W, "CallSiteInfo");
-
StringRef LinkageName;
if (ObjDelegate) {
ObjDelegate->printRelocatedField("CodeOffset",
@@ -200,8 +207,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
EnvBlockSym &EnvBlock) {
- DictScope S(W, "EnvBlock");
-
ListScope L(W, "Entries");
for (auto Entry : EnvBlock.Fields) {
W.printString(Entry);
@@ -211,7 +216,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
FileStaticSym &FileStatic) {
- DictScope S(W, "FileStatic");
printTypeIndex("Index", FileStatic.Index);
W.printNumber("ModFilenameOffset", FileStatic.ModFilenameOffset);
W.printFlags("Flags", uint16_t(FileStatic.Flags), getLocalFlagNames());
@@ -220,7 +224,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, ExportSym &Export) {
- DictScope S(W, "Export");
W.printNumber("Ordinal", Export.Ordinal);
W.printFlags("Flags", uint16_t(Export.Flags), getExportSymFlagNames());
W.printString("Name", Export.Name);
@@ -229,8 +232,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, ExportSym &Export) {
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Compile2Sym &Compile2) {
- DictScope S(W, "CompilerFlags2");
-
W.printEnum("Language", Compile2.getLanguage(), getSourceLanguageNames());
W.printFlags("Flags", Compile2.getFlags(), getCompileSym2FlagNames());
W.printEnum("Machine", unsigned(Compile2.Machine), getCPUTypeNames());
@@ -254,8 +255,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Compile3Sym &Compile3) {
- DictScope S(W, "CompilerFlags3");
-
W.printEnum("Language", Compile3.getLanguage(), getSourceLanguageNames());
W.printFlags("Flags", Compile3.getFlags(), getCompileSym3FlagNames());
W.printEnum("Machine", unsigned(Compile3.Machine), getCPUTypeNames());
@@ -281,8 +280,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
ConstantSym &Constant) {
- DictScope S(W, "Constant");
-
printTypeIndex("Type", Constant.Type);
W.printNumber("Value", Constant.Value);
W.printString("Name", Constant.Name);
@@ -290,9 +287,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, DataSym &Data) {
- DictScope S(W, "DataSym");
-
- W.printEnum("Kind", uint16_t(CVR.kind()), getSymbolTypeNames());
StringRef LinkageName;
if (ObjDelegate) {
ObjDelegate->printRelocatedField("DataOffset", Data.getRelocationOffset(),
@@ -308,15 +302,12 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, DataSym &Data) {
Error CVSymbolDumperImpl::visitKnownRecord(
CVSymbol &CVR,
DefRangeFramePointerRelFullScopeSym &DefRangeFramePointerRelFullScope) {
- DictScope S(W, "DefRangeFramePointerRelFullScope");
W.printNumber("Offset", DefRangeFramePointerRelFullScope.Offset);
return Error::success();
}
Error CVSymbolDumperImpl::visitKnownRecord(
CVSymbol &CVR, DefRangeFramePointerRelSym &DefRangeFramePointerRel) {
- DictScope S(W, "DefRangeFramePointerRel");
-
W.printNumber("Offset", DefRangeFramePointerRel.Offset);
printLocalVariableAddrRange(DefRangeFramePointerRel.Range,
DefRangeFramePointerRel.getRelocationOffset());
@@ -326,8 +317,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(
Error CVSymbolDumperImpl::visitKnownRecord(
CVSymbol &CVR, DefRangeRegisterRelSym &DefRangeRegisterRel) {
- DictScope S(W, "DefRangeRegisterRel");
-
W.printNumber("BaseRegister", DefRangeRegisterRel.Hdr.Register);
W.printBoolean("HasSpilledUDTMember",
DefRangeRegisterRel.hasSpilledUDTMember());
@@ -341,8 +330,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(
Error CVSymbolDumperImpl::visitKnownRecord(
CVSymbol &CVR, DefRangeRegisterSym &DefRangeRegister) {
- DictScope S(W, "DefRangeRegister");
-
W.printNumber("Register", DefRangeRegister.Hdr.Register);
W.printNumber("MayHaveNoName", DefRangeRegister.Hdr.MayHaveNoName);
printLocalVariableAddrRange(DefRangeRegister.Range,
@@ -353,8 +340,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(
Error CVSymbolDumperImpl::visitKnownRecord(
CVSymbol &CVR, DefRangeSubfieldRegisterSym &DefRangeSubfieldRegister) {
- DictScope S(W, "DefRangeSubfieldRegister");
-
W.printNumber("Register", DefRangeSubfieldRegister.Hdr.Register);
W.printNumber("MayHaveNoName", DefRangeSubfieldRegister.Hdr.MayHaveNoName);
W.printNumber("OffsetInParent", DefRangeSubfieldRegister.Hdr.OffsetInParent);
@@ -366,8 +351,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(
Error CVSymbolDumperImpl::visitKnownRecord(
CVSymbol &CVR, DefRangeSubfieldSym &DefRangeSubfield) {
- DictScope S(W, "DefRangeSubfield");
-
if (ObjDelegate) {
DebugStringTableSubsectionRef Strings = ObjDelegate->getStringTable();
auto ExpectedProgram = Strings.getString(DefRangeSubfield.Program);
@@ -387,8 +370,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
DefRangeSym &DefRange) {
- DictScope S(W, "DefRange");
-
if (ObjDelegate) {
DebugStringTableSubsectionRef Strings = ObjDelegate->getStringTable();
auto ExpectedProgram = Strings.getString(DefRange.Program);
@@ -406,8 +387,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
FrameCookieSym &FrameCookie) {
- DictScope S(W, "FrameCookie");
-
StringRef LinkageName;
if (ObjDelegate) {
ObjDelegate->printRelocatedField("CodeOffset",
@@ -423,8 +402,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
FrameProcSym &FrameProc) {
- DictScope S(W, "FrameProc");
-
W.printHex("TotalFrameBytes", FrameProc.TotalFrameBytes);
W.printHex("PaddingFrameBytes", FrameProc.PaddingFrameBytes);
W.printHex("OffsetToPadding", FrameProc.OffsetToPadding);
@@ -440,8 +417,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(
CVSymbol &CVR, HeapAllocationSiteSym &HeapAllocSite) {
- DictScope S(W, "HeapAllocationSite");
-
StringRef LinkageName;
if (ObjDelegate) {
ObjDelegate->printRelocatedField("CodeOffset",
@@ -458,8 +433,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
InlineSiteSym &InlineSite) {
- DictScope S(W, "InlineSite");
-
W.printHex("PtrParent", InlineSite.Parent);
W.printHex("PtrEnd", InlineSite.End);
printTypeIndex("Inlinee", InlineSite.Inlinee);
@@ -515,7 +488,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
RegisterSym &Register) {
- DictScope S(W, "RegisterSym");
printTypeIndex("Type", Register.Index);
W.printEnum("Seg", uint16_t(Register.Register), getRegisterNames());
W.printString("Name", Register.Name);
@@ -523,7 +495,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, PublicSym32 &Public) {
- DictScope S(W, "PublicSym");
W.printFlags("Flags", uint32_t(Public.Flags), getPublicSymFlagNames());
W.printNumber("Seg", Public.Segment);
W.printNumber("Off", Public.Offset);
@@ -532,7 +503,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, PublicSym32 &Public) {
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, ProcRefSym &ProcRef) {
- DictScope S(W, "ProcRef");
W.printNumber("SumName", ProcRef.SumName);
W.printNumber("SymOffset", ProcRef.SymOffset);
W.printNumber("Mod", ProcRef.Module);
@@ -541,8 +511,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, ProcRefSym &ProcRef) {
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, LabelSym &Label) {
- DictScope S(W, "Label");
-
StringRef LinkageName;
if (ObjDelegate) {
ObjDelegate->printRelocatedField("CodeOffset", Label.getRelocationOffset(),
@@ -558,8 +526,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, LabelSym &Label) {
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, LocalSym &Local) {
- DictScope S(W, "Local");
-
printTypeIndex("Type", Local.Type);
W.printFlags("Flags", uint16_t(Local.Flags), getLocalFlagNames());
W.printString("VarName", Local.Name);
@@ -567,16 +533,12 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, LocalSym &Local) {
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, ObjNameSym &ObjName) {
- DictScope S(W, "ObjectName");
-
W.printHex("Signature", ObjName.Signature);
W.printString("ObjectName", ObjName.Name);
return Error::success();
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, ProcSym &Proc) {
- DictScope S(W, "ProcStart");
-
if (InFunctionScope)
return llvm::make_error<CodeViewError>(
"Visiting a ProcSym while inside function scope!");
@@ -584,7 +546,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, ProcSym &Proc) {
InFunctionScope = true;
StringRef LinkageName;
- W.printEnum("Kind", uint16_t(CVR.kind()), getSymbolTypeNames());
W.printHex("PtrParent", Proc.Parent);
W.printHex("PtrEnd", Proc.End);
W.printHex("PtrNext", Proc.Next);
@@ -607,13 +568,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, ProcSym &Proc) {
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
ScopeEndSym &ScopeEnd) {
- if (CVR.kind() == SymbolKind::S_END)
- DictScope S(W, "BlockEnd");
- else if (CVR.kind() == SymbolKind::S_PROC_ID_END)
- DictScope S(W, "ProcEnd");
- else if (CVR.kind() == SymbolKind::S_INLINESITE_END)
- DictScope S(W, "InlineSiteEnd");
-
InFunctionScope = false;
return Error::success();
}
@@ -627,8 +581,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, CallerSym &Caller) {
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
RegRelativeSym &RegRel) {
- DictScope S(W, "RegRelativeSym");
-
W.printHex("Offset", RegRel.Offset);
printTypeIndex("Type", RegRel.Type);
W.printEnum("Register", uint16_t(RegRel.Register), getRegisterNames());
@@ -638,8 +590,6 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
ThreadLocalDataSym &Data) {
- DictScope S(W, "ThreadLocalDataSym");
-
StringRef LinkageName;
if (ObjDelegate) {
ObjDelegate->printRelocatedField("DataOffset", Data.getRelocationOffset(),
@@ -653,15 +603,12 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
}
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, UDTSym &UDT) {
- DictScope S(W, "UDT");
printTypeIndex("Type", UDT.Type);
W.printString("UDTName", UDT.Name);
return Error::success();
}
Error CVSymbolDumperImpl::visitUnknownSymbol(CVSymbol &CVR) {
- DictScope S(W, "UnknownSym");
- W.printEnum("Kind", uint16_t(CVR.kind()), getSymbolTypeNames());
W.printNumber("Length", CVR.length());
return Error::success();
}
diff --git a/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp b/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp
index 72cb9e2e3544..0d935c4472ae 100644
--- a/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp
+++ b/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp
@@ -382,6 +382,13 @@ static bool discoverTypeIndices(ArrayRef<uint8_t> Content, SymbolKind Kind,
case SymbolKind::S_BUILDINFO:
Refs.push_back({TiRefKind::IndexRef, 0, 1}); // Compile flags
break;
+ case SymbolKind::S_LTHREAD32:
+ case SymbolKind::S_GTHREAD32:
+ Refs.push_back({TiRefKind::TypeRef, 0, 1}); // Type
+ break;
+ case SymbolKind::S_FILESTATIC:
+ Refs.push_back({TiRefKind::TypeRef, 0, 1}); // Type
+ break;
case SymbolKind::S_LOCAL:
Refs.push_back({TiRefKind::TypeRef, 0, 1}); // Type
break;
@@ -403,6 +410,10 @@ static bool discoverTypeIndices(ArrayRef<uint8_t> Content, SymbolKind Kind,
case SymbolKind::S_INLINESITE:
Refs.push_back({TiRefKind::IndexRef, 8, 1}); // ID of inlinee
break;
+ case SymbolKind::S_HEAPALLOCSITE:
+ // FIXME: It's not clear if this is a type or item reference.
+ Refs.push_back({TiRefKind::IndexRef, 8, 1}); // signature
+ break;
// Defranges don't have types, just registers and code offsets.
case SymbolKind::S_DEFRANGE_REGISTER:
@@ -419,6 +430,7 @@ static bool discoverTypeIndices(ArrayRef<uint8_t> Content, SymbolKind Kind,
case SymbolKind::S_COMPILE:
case SymbolKind::S_COMPILE2:
case SymbolKind::S_COMPILE3:
+ case SymbolKind::S_ENVBLOCK:
case SymbolKind::S_BLOCK32:
case SymbolKind::S_FRAMEPROC:
break;
diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp
index a18d4efec07a..495e09fbae35 100644
--- a/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -591,10 +591,10 @@ void DWARFContext::parseCompileUnits() {
void DWARFContext::parseTypeUnits() {
if (!TUs.empty())
return;
- for (const auto &I : getTypesSections()) {
+ forEachTypesSections([&](const DWARFSection &S) {
TUs.emplace_back();
- TUs.back().parse(*this, I.second);
- }
+ TUs.back().parse(*this, S);
+ });
}
void DWARFContext::parseDWOCompileUnits() {
@@ -604,10 +604,10 @@ void DWARFContext::parseDWOCompileUnits() {
void DWARFContext::parseDWOTypeUnits() {
if (!DWOTUs.empty())
return;
- for (const auto &I : getTypesDWOSections()) {
+ forEachTypesDWOSections([&](const DWARFSection &S) {
DWOTUs.emplace_back();
- DWOTUs.back().parseDWO(*this, I.second);
- }
+ DWOTUs.back().parseDWO(*this, S);
+ });
}
DWARFCompileUnit *DWARFContext::getCompileUnitForOffset(uint32_t Offset) {
@@ -937,27 +937,23 @@ DWARFContextInMemory::DWARFContextInMemory(
: FileName(Obj.getFileName()), IsLittleEndian(Obj.isLittleEndian()),
AddressSize(Obj.getBytesInAddress()) {
for (const SectionRef &Section : Obj.sections()) {
- StringRef name;
- Section.getName(name);
+ StringRef Name;
+ Section.getName(Name);
// Skip BSS and Virtual sections, they aren't interesting.
- bool IsBSS = Section.isBSS();
- if (IsBSS)
- continue;
- bool IsVirtual = Section.isVirtual();
- if (IsVirtual)
+ if (Section.isBSS() || Section.isVirtual())
continue;
- StringRef data;
+ StringRef Data;
section_iterator RelocatedSection = Section.getRelocatedSection();
// Try to obtain an already relocated version of this section.
// Else use the unrelocated section from the object file. We'll have to
// apply relocations ourselves later.
- if (!L || !L->getLoadedSectionContents(*RelocatedSection, data))
- Section.getContents(data);
+ if (!L || !L->getLoadedSectionContents(*RelocatedSection, Data))
+ Section.getContents(Data);
- if (auto Err = maybeDecompress(Section, name, data)) {
+ if (auto Err = maybeDecompress(Section, Name, Data)) {
ErrorPolicy EP = HandleError(
- createError("failed to decompress '" + name + "', ", std::move(Err)));
+ createError("failed to decompress '" + Name + "', ", std::move(Err)));
if (EP == ErrorPolicy::Halt)
return;
continue;
@@ -965,27 +961,27 @@ DWARFContextInMemory::DWARFContextInMemory(
// Compressed sections names in GNU style starts from ".z",
// at this point section is decompressed and we drop compression prefix.
- name = name.substr(
- name.find_first_not_of("._z")); // Skip ".", "z" and "_" prefixes.
+ Name = Name.substr(
+ Name.find_first_not_of("._z")); // Skip ".", "z" and "_" prefixes.
+
+ // Map platform specific debug section names to DWARF standard section
+ // names.
+ Name = Obj.mapDebugSectionName(Name);
- if (StringRef *SectionData = MapSectionToMember(name)) {
- *SectionData = data;
- if (name == "debug_ranges") {
+ if (StringRef *SectionData = mapSectionToMember(Name)) {
+ *SectionData = Data;
+ if (Name == "debug_ranges") {
// FIXME: Use the other dwo range section when we emit it.
- RangeDWOSection.Data = data;
+ RangeDWOSection.Data = Data;
}
- } else if (name == "debug_types") {
+ } else if (Name == "debug_types") {
// Find debug_types data by section rather than name as there are
// multiple, comdat grouped, debug_types sections.
- TypesSections[Section].Data = data;
- } else if (name == "debug_types.dwo") {
- TypesDWOSections[Section].Data = data;
+ TypesSections[Section].Data = Data;
+ } else if (Name == "debug_types.dwo") {
+ TypesDWOSections[Section].Data = Data;
}
- // Map platform specific debug section names to DWARF standard section
- // names.
- name = Obj.mapDebugSectionName(name);
-
if (RelocatedSection == Obj.section_end())
continue;
@@ -1012,21 +1008,8 @@ DWARFContextInMemory::DWARFContextInMemory(
// TODO: Add support for relocations in other sections as needed.
// Record relocations for the debug_info and debug_line sections.
- RelocAddrMap *Map =
- StringSwitch<RelocAddrMap *>(RelSecName)
- .Case("debug_info", &InfoSection.Relocs)
- .Case("debug_loc", &LocSection.Relocs)
- .Case("debug_info.dwo", &InfoDWOSection.Relocs)
- .Case("debug_line", &LineSection.Relocs)
- .Case("debug_str_offsets", &StringOffsetSection.Relocs)
- .Case("debug_ranges", &RangeSection.Relocs)
- .Case("debug_addr", &AddrSection.Relocs)
- .Case("apple_names", &AppleNamesSection.Relocs)
- .Case("apple_types", &AppleTypesSection.Relocs)
- .Case("apple_namespaces", &AppleNamespacesSection.Relocs)
- .Case("apple_namespac", &AppleNamespacesSection.Relocs)
- .Case("apple_objc", &AppleObjCSection.Relocs)
- .Default(nullptr);
+ DWARFSection *Sec = mapNameToDWARFSection(RelSecName);
+ RelocAddrMap *Map = Sec ? &Sec->Relocs : nullptr;
if (!Map) {
// Find debug_types relocs by section rather than name as there are
// multiple, comdat grouped, debug_types sections.
@@ -1059,10 +1042,10 @@ DWARFContextInMemory::DWARFContextInMemory(
object::RelocVisitor V(Obj);
uint64_t Val = V.visit(Reloc.getType(), Reloc, SymInfoOrErr->Address);
if (V.error()) {
- SmallString<32> Name;
- Reloc.getTypeName(Name);
+ SmallString<32> Type;
+ Reloc.getTypeName(Type);
ErrorPolicy EP = HandleError(
- createError("failed to compute relocation: " + Name + ", ",
+ createError("failed to compute relocation: " + Type + ", ",
errorCodeToError(object_error::parse_failed)));
if (EP == ErrorPolicy::Halt)
return;
@@ -1079,40 +1062,47 @@ DWARFContextInMemory::DWARFContextInMemory(
bool isLittleEndian)
: IsLittleEndian(isLittleEndian), AddressSize(AddrSize) {
for (const auto &SecIt : Sections) {
- if (StringRef *SectionData = MapSectionToMember(SecIt.first()))
+ if (StringRef *SectionData = mapSectionToMember(SecIt.first()))
*SectionData = SecIt.second->getBuffer();
}
}
-StringRef *DWARFContextInMemory::MapSectionToMember(StringRef Name) {
+DWARFSection *DWARFContextInMemory::mapNameToDWARFSection(StringRef Name) {
+ return StringSwitch<DWARFSection *>(Name)
+ .Case("debug_info", &InfoSection)
+ .Case("debug_loc", &LocSection)
+ .Case("debug_line", &LineSection)
+ .Case("debug_str_offsets", &StringOffsetSection)
+ .Case("debug_ranges", &RangeSection)
+ .Case("debug_info.dwo", &InfoDWOSection)
+ .Case("debug_loc.dwo", &LocDWOSection)
+ .Case("debug_line.dwo", &LineDWOSection)
+ .Case("debug_str_offsets.dwo", &StringOffsetDWOSection)
+ .Case("debug_addr", &AddrSection)
+ .Case("apple_names", &AppleNamesSection)
+ .Case("apple_types", &AppleTypesSection)
+ .Case("apple_namespaces", &AppleNamespacesSection)
+ .Case("apple_namespac", &AppleNamespacesSection)
+ .Case("apple_objc", &AppleObjCSection)
+ .Default(nullptr);
+}
+
+StringRef *DWARFContextInMemory::mapSectionToMember(StringRef Name) {
+ if (DWARFSection *Sec = mapNameToDWARFSection(Name))
+ return &Sec->Data;
return StringSwitch<StringRef *>(Name)
- .Case("debug_info", &InfoSection.Data)
.Case("debug_abbrev", &AbbrevSection)
- .Case("debug_loc", &LocSection.Data)
- .Case("debug_line", &LineSection.Data)
.Case("debug_aranges", &ARangeSection)
.Case("debug_frame", &DebugFrameSection)
.Case("eh_frame", &EHFrameSection)
.Case("debug_str", &StringSection)
- .Case("debug_str_offsets", &StringOffsetSection.Data)
- .Case("debug_ranges", &RangeSection.Data)
.Case("debug_macinfo", &MacinfoSection)
.Case("debug_pubnames", &PubNamesSection)
.Case("debug_pubtypes", &PubTypesSection)
.Case("debug_gnu_pubnames", &GnuPubNamesSection)
.Case("debug_gnu_pubtypes", &GnuPubTypesSection)
- .Case("debug_info.dwo", &InfoDWOSection.Data)
.Case("debug_abbrev.dwo", &AbbrevDWOSection)
- .Case("debug_loc.dwo", &LocDWOSection.Data)
- .Case("debug_line.dwo", &LineDWOSection.Data)
.Case("debug_str.dwo", &StringDWOSection)
- .Case("debug_str_offsets.dwo", &StringOffsetDWOSection.Data)
- .Case("debug_addr", &AddrSection.Data)
- .Case("apple_names", &AppleNamesSection.Data)
- .Case("apple_types", &AppleTypesSection.Data)
- .Case("apple_namespaces", &AppleNamespacesSection.Data)
- .Case("apple_namespac", &AppleNamespacesSection.Data)
- .Case("apple_objc", &AppleObjCSection.Data)
.Case("debug_cu_index", &CUIndexSection)
.Case("debug_tu_index", &TUIndexSection)
.Case("gdb_index", &GdbIndexSection)
diff --git a/lib/DebugInfo/DWARF/DWARFDie.cpp b/lib/DebugInfo/DWARF/DWARFDie.cpp
index ef416f72ad17..111f0bbd4444 100644
--- a/lib/DebugInfo/DWARF/DWARFDie.cpp
+++ b/lib/DebugInfo/DWARF/DWARFDie.cpp
@@ -395,7 +395,7 @@ DWARFDie::attribute_iterator::attribute_iterator(DWARFDie D, bool End) :
void DWARFDie::attribute_iterator::updateForIndex(
const DWARFAbbreviationDeclaration &AbbrDecl, uint32_t I) {
Index = I;
- // AbbrDecl must be valid befor calling this function.
+ // AbbrDecl must be valid before calling this function.
auto NumAttrs = AbbrDecl.getNumAttributes();
if (Index < NumAttrs) {
AttrValue.Attr = AbbrDecl.getAttrByIndex(Index);
diff --git a/lib/DebugInfo/PDB/CMakeLists.txt b/lib/DebugInfo/PDB/CMakeLists.txt
index e9fd29ccc4ca..ff01c948e099 100644
--- a/lib/DebugInfo/PDB/CMakeLists.txt
+++ b/lib/DebugInfo/PDB/CMakeLists.txt
@@ -41,6 +41,7 @@ add_pdb_impl_folder(Native
Native/InfoStream.cpp
Native/InfoStreamBuilder.cpp
Native/ModuleDebugStream.cpp
+ Native/NativeBuiltinSymbol.cpp
Native/NativeCompilandSymbol.cpp
Native/NativeEnumModules.cpp
Native/NativeExeSymbol.cpp
@@ -53,6 +54,7 @@ add_pdb_impl_folder(Native
Native/PDBStringTableBuilder.cpp
Native/PDBTypeServerHandler.cpp
Native/PublicsStream.cpp
+ Native/PublicsStreamBuilder.cpp
Native/RawError.cpp
Native/SymbolStream.cpp
Native/TpiHashing.cpp
diff --git a/lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp b/lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp
index 745dd742aadc..897f78c51032 100644
--- a/lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp
+++ b/lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp
@@ -65,6 +65,10 @@ void DbiModuleDescriptorBuilder::setObjFileName(StringRef Name) {
ObjFileName = Name;
}
+void DbiModuleDescriptorBuilder::setPdbFilePathNI(uint32_t NI) {
+ PdbFilePathNI = NI;
+}
+
void DbiModuleDescriptorBuilder::addSymbol(CVSymbol Symbol) {
Symbols.push_back(Symbol);
// Symbols written to a PDB file are required to be 4 byte aligned. The same
@@ -111,7 +115,7 @@ void DbiModuleDescriptorBuilder::finalize() {
(void)Layout.Mod; // Set in constructor
(void)Layout.ModDiStream; // Set in finalizeMsfLayout
Layout.NumFiles = SourceFiles.size();
- Layout.PdbFilePathNI = 0;
+ Layout.PdbFilePathNI = PdbFilePathNI;
Layout.SrcFileNameNI = 0;
// This value includes both the signature field as well as the record bytes
diff --git a/lib/DebugInfo/PDB/Native/DbiStream.cpp b/lib/DebugInfo/PDB/Native/DbiStream.cpp
index a1f0671dec3e..0eeac7e4c084 100644
--- a/lib/DebugInfo/PDB/Native/DbiStream.cpp
+++ b/lib/DebugInfo/PDB/Native/DbiStream.cpp
@@ -225,6 +225,10 @@ void DbiStream::visitSectionContributions(
}
}
+Expected<StringRef> DbiStream::getECName(uint32_t NI) const {
+ return ECNames.getStringForID(NI);
+}
+
Error DbiStream::initializeSectionContributionData() {
if (SecContrSubstream.empty())
return Error::success();
@@ -248,6 +252,9 @@ Error DbiStream::initializeSectionHeadersData() {
return Error::success();
uint32_t StreamNum = getDebugStreamIndex(DbgHeaderType::SectionHdr);
+ if (StreamNum == kInvalidStreamIndex)
+ return Error::success();
+
if (StreamNum >= Pdb.getNumStreams())
return make_error<RawError>(raw_error_code::no_stream);
diff --git a/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp b/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
index aad247ea185f..25076e40fc98 100644
--- a/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
+++ b/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
@@ -49,9 +49,17 @@ void DbiStreamBuilder::setSectionMap(ArrayRef<SecMapEntry> SecMap) {
SectionMap = SecMap;
}
+void DbiStreamBuilder::setSymbolRecordStreamIndex(uint32_t Index) {
+ SymRecordStreamIndex = Index;
+}
+
+void DbiStreamBuilder::setPublicsStreamIndex(uint32_t Index) {
+ PublicsStreamIndex = Index;
+}
+
Error DbiStreamBuilder::addDbgStream(pdb::DbgHeaderType Type,
ArrayRef<uint8_t> Data) {
- if (DbgStreams[(int)Type].StreamNumber)
+ if (DbgStreams[(int)Type].StreamNumber != kInvalidStreamIndex)
return make_error<RawError>(raw_error_code::duplicate_entry,
"The specified stream type already exists");
auto ExpectedIndex = Msf.addStream(Data.size());
@@ -63,11 +71,16 @@ Error DbiStreamBuilder::addDbgStream(pdb::DbgHeaderType Type,
return Error::success();
}
+uint32_t DbiStreamBuilder::addECName(StringRef Name) {
+ return ECNamesBuilder.insert(Name);
+}
+
uint32_t DbiStreamBuilder::calculateSerializedLength() const {
// For now we only support serializing the header.
return sizeof(DbiStreamHeader) + calculateFileInfoSubstreamSize() +
calculateModiSubstreamSize() + calculateSectionContribsStreamSize() +
- calculateSectionMapStreamSize() + calculateDbgStreamsSize();
+ calculateSectionMapStreamSize() + calculateDbgStreamsSize() +
+ ECNamesBuilder.calculateSerializedSize();
}
Expected<DbiModuleDescriptorBuilder &>
@@ -247,15 +260,15 @@ Error DbiStreamBuilder::finalize() {
H->PdbDllVersion = PdbDllVersion;
H->MachineType = static_cast<uint16_t>(MachineType);
- H->ECSubstreamSize = 0;
+ H->ECSubstreamSize = ECNamesBuilder.calculateSerializedSize();
H->FileInfoSize = FileInfoBuffer.getLength();
H->ModiSubstreamSize = calculateModiSubstreamSize();
H->OptionalDbgHdrSize = DbgStreams.size() * sizeof(uint16_t);
H->SecContrSubstreamSize = calculateSectionContribsStreamSize();
H->SectionMapSize = calculateSectionMapStreamSize();
H->TypeServerSize = 0;
- H->SymRecordStreamIndex = kInvalidStreamIndex;
- H->PublicSymbolStreamIndex = kInvalidStreamIndex;
+ H->SymRecordStreamIndex = SymRecordStreamIndex;
+ H->PublicSymbolStreamIndex = PublicsStreamIndex;
H->MFCTypeServerIndex = kInvalidStreamIndex;
H->GlobalSymbolStreamIndex = kInvalidStreamIndex;
@@ -383,6 +396,9 @@ Error DbiStreamBuilder::commit(const msf::MSFLayout &Layout,
if (auto EC = Writer.writeStreamRef(FileInfoBuffer))
return EC;
+ if (auto EC = ECNamesBuilder.commit(Writer))
+ return EC;
+
for (auto &Stream : DbgStreams)
if (auto EC = Writer.writeInteger(Stream.StreamNumber))
return EC;
diff --git a/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp b/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp
index 354b8c0e07ff..6cdf6dde04d9 100644
--- a/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp
+++ b/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp
@@ -86,7 +86,8 @@ Error NamedStreamMap::commit(BinaryStreamWriter &Writer) const {
for (const auto &Name : OrderedStreamNames) {
auto Item = Mapping.find(Name);
- assert(Item != Mapping.end());
+ if (Item == Mapping.end())
+ continue;
if (auto EC = Writer.writeCString(Item->getKey()))
return EC;
}
@@ -108,7 +109,8 @@ uint32_t NamedStreamMap::finalize() {
for (const auto &Name : OrderedStreamNames) {
auto Item = Mapping.find(Name);
- assert(Item != Mapping.end());
+ if (Item == Mapping.end())
+ continue;
FinalizedHashTable.set(FinalizedInfo->StringDataBytes, Item->getValue());
FinalizedInfo->StringDataBytes += Item->getKeyLength() + 1;
}
diff --git a/lib/DebugInfo/PDB/Native/NativeBuiltinSymbol.cpp b/lib/DebugInfo/PDB/Native/NativeBuiltinSymbol.cpp
new file mode 100644
index 000000000000..60416f69e137
--- /dev/null
+++ b/lib/DebugInfo/PDB/Native/NativeBuiltinSymbol.cpp
@@ -0,0 +1,48 @@
+//===- NativeBuiltinSymbol.cpp ------------------------------------ C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h"
+
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+NativeBuiltinSymbol::NativeBuiltinSymbol(NativeSession &PDBSession,
+ SymIndexId Id, PDB_BuiltinType T,
+ uint64_t L)
+ : NativeRawSymbol(PDBSession, Id), Session(PDBSession), Type(T), Length(L) {
+}
+
+NativeBuiltinSymbol::~NativeBuiltinSymbol() {}
+
+std::unique_ptr<NativeRawSymbol> NativeBuiltinSymbol::clone() const {
+ return llvm::make_unique<NativeBuiltinSymbol>(Session, SymbolId, Type, Length);
+}
+
+void NativeBuiltinSymbol::dump(raw_ostream &OS, int Indent) const {
+ // TODO: Apparently nothing needs this yet.
+}
+
+PDB_SymType NativeBuiltinSymbol::getSymTag() const {
+ return PDB_SymType::BuiltinType;
+}
+
+PDB_BuiltinType NativeBuiltinSymbol::getBuiltinType() const { return Type; }
+
+bool NativeBuiltinSymbol::isConstType() const { return false; }
+
+uint64_t NativeBuiltinSymbol::getLength() const { return Length; }
+
+bool NativeBuiltinSymbol::isUnalignedType() const { return false; }
+
+bool NativeBuiltinSymbol::isVolatileType() const { return false; }
+
+} // namespace pdb
+} // namespace llvm
diff --git a/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp b/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
index 180c169ec209..7132a99a9f16 100644
--- a/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
+++ b/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
@@ -15,7 +15,7 @@ namespace llvm {
namespace pdb {
NativeCompilandSymbol::NativeCompilandSymbol(NativeSession &Session,
- uint32_t SymbolId,
+ SymIndexId SymbolId,
DbiModuleDescriptor MI)
: NativeRawSymbol(Session, SymbolId), Module(MI) {}
diff --git a/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp b/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
index 6206155b9fb6..cb0830f453c8 100644
--- a/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
+++ b/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
@@ -18,7 +18,7 @@
namespace llvm {
namespace pdb {
-NativeExeSymbol::NativeExeSymbol(NativeSession &Session, uint32_t SymbolId)
+NativeExeSymbol::NativeExeSymbol(NativeSession &Session, SymIndexId SymbolId)
: NativeRawSymbol(Session, SymbolId), File(Session.getPDBFile()) {}
std::unique_ptr<NativeRawSymbol> NativeExeSymbol::clone() const {
diff --git a/lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp b/lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp
index b4f5c96ce66b..92612bcea4ac 100644
--- a/lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp
+++ b/lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp
@@ -13,7 +13,7 @@
using namespace llvm;
using namespace llvm::pdb;
-NativeRawSymbol::NativeRawSymbol(NativeSession &PDBSession, uint32_t SymbolId)
+NativeRawSymbol::NativeRawSymbol(NativeSession &PDBSession, SymIndexId SymbolId)
: Session(PDBSession), SymbolId(SymbolId) {}
void NativeRawSymbol::dump(raw_ostream &OS, int Indent) const {}
diff --git a/lib/DebugInfo/PDB/Native/NativeSession.cpp b/lib/DebugInfo/PDB/Native/NativeSession.cpp
index 93d43d9ef341..76de0d8f9e7e 100644
--- a/lib/DebugInfo/PDB/Native/NativeSession.cpp
+++ b/lib/DebugInfo/PDB/Native/NativeSession.cpp
@@ -10,9 +10,11 @@
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/PDB/GenericError.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"
+#include "llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeExeSymbol.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
@@ -33,6 +35,28 @@ using namespace llvm;
using namespace llvm::msf;
using namespace llvm::pdb;
+namespace {
+// Maps codeview::SimpleTypeKind of a built-in type to the parameters necessary
+// to instantiate a NativeBuiltinSymbol for that type.
+static const struct BuiltinTypeEntry {
+ codeview::SimpleTypeKind Kind;
+ PDB_BuiltinType Type;
+ uint32_t Size;
+} BuiltinTypes[] = {
+ {codeview::SimpleTypeKind::Int32, PDB_BuiltinType::Int, 4},
+ {codeview::SimpleTypeKind::UInt32, PDB_BuiltinType::UInt, 4},
+ {codeview::SimpleTypeKind::UInt32Long, PDB_BuiltinType::UInt, 4},
+ {codeview::SimpleTypeKind::UInt64Quad, PDB_BuiltinType::UInt, 8},
+ {codeview::SimpleTypeKind::NarrowCharacter, PDB_BuiltinType::Char, 1},
+ {codeview::SimpleTypeKind::SignedCharacter, PDB_BuiltinType::Char, 1},
+ {codeview::SimpleTypeKind::UnsignedCharacter, PDB_BuiltinType::UInt, 1},
+ {codeview::SimpleTypeKind::UInt16Short, PDB_BuiltinType::UInt, 2},
+ {codeview::SimpleTypeKind::Boolean8, PDB_BuiltinType::Bool, 1}
+ // This table can be grown as necessary, but these are the only types we've
+ // needed so far.
+};
+} // namespace
+
NativeSession::NativeSession(std::unique_ptr<PDBFile> PdbFile,
std::unique_ptr<BumpPtrAllocator> Allocator)
: Pdb(std::move(PdbFile)), Allocator(std::move(Allocator)) {}
@@ -71,19 +95,51 @@ Error NativeSession::createFromExe(StringRef Path,
std::unique_ptr<PDBSymbolCompiland>
NativeSession::createCompilandSymbol(DbiModuleDescriptor MI) {
- const auto Id = static_cast<uint32_t>(SymbolCache.size());
+ const auto Id = static_cast<SymIndexId>(SymbolCache.size());
SymbolCache.push_back(
llvm::make_unique<NativeCompilandSymbol>(*this, Id, MI));
return llvm::make_unique<PDBSymbolCompiland>(
*this, std::unique_ptr<IPDBRawSymbol>(SymbolCache[Id]->clone()));
}
+SymIndexId NativeSession::findSymbolByTypeIndex(codeview::TypeIndex Index) {
+ // First see if it's already in our cache.
+ const auto Entry = TypeIndexToSymbolId.find(Index);
+ if (Entry != TypeIndexToSymbolId.end())
+ return Entry->second;
+
+ // Symbols for built-in types are created on the fly.
+ if (Index.isSimple()) {
+ // FIXME: We will eventually need to handle pointers to other simple types,
+ // which are still simple types in the world of CodeView TypeIndexes.
+ if (Index.getSimpleMode() != codeview::SimpleTypeMode::Direct)
+ return 0;
+ const auto Kind = Index.getSimpleKind();
+ const auto It =
+ std::find_if(std::begin(BuiltinTypes), std::end(BuiltinTypes),
+ [Kind](const BuiltinTypeEntry &Builtin) {
+ return Builtin.Kind == Kind;
+ });
+ if (It == std::end(BuiltinTypes))
+ return 0;
+ SymIndexId Id = SymbolCache.size();
+ SymbolCache.emplace_back(
+ llvm::make_unique<NativeBuiltinSymbol>(*this, Id, It->Type, It->Size));
+ TypeIndexToSymbolId[Index] = Id;
+ return Id;
+ }
+
+ // TODO: Look up PDB type by type index
+
+ return 0;
+}
+
uint64_t NativeSession::getLoadAddress() const { return 0; }
void NativeSession::setLoadAddress(uint64_t Address) {}
std::unique_ptr<PDBSymbolExe> NativeSession::getGlobalScope() {
- const auto Id = static_cast<uint32_t>(SymbolCache.size());
+ const auto Id = static_cast<SymIndexId>(SymbolCache.size());
SymbolCache.push_back(llvm::make_unique<NativeExeSymbol>(*this, Id));
auto RawSymbol = SymbolCache[Id]->clone();
auto PdbSymbol(PDBSymbol::create(*this, std::move(RawSymbol)));
diff --git a/lib/DebugInfo/PDB/Native/PDBFile.cpp b/lib/DebugInfo/PDB/Native/PDBFile.cpp
index 4f6ebb0cb342..0b6492efc70f 100644
--- a/lib/DebugInfo/PDB/Native/PDBFile.cpp
+++ b/lib/DebugInfo/PDB/Native/PDBFile.cpp
@@ -385,8 +385,11 @@ bool PDBFile::hasPDBDbiStream() const { return StreamDBI < getNumStreams(); }
bool PDBFile::hasPDBGlobalsStream() {
auto DbiS = getPDBDbiStream();
- if (!DbiS)
+ if (!DbiS) {
+ consumeError(DbiS.takeError());
return false;
+ }
+
return DbiS->getGlobalSymbolStreamIndex() < getNumStreams();
}
@@ -396,8 +399,10 @@ bool PDBFile::hasPDBIpiStream() const { return StreamIPI < getNumStreams(); }
bool PDBFile::hasPDBPublicsStream() {
auto DbiS = getPDBDbiStream();
- if (!DbiS)
+ if (!DbiS) {
+ consumeError(DbiS.takeError());
return false;
+ }
return DbiS->getPublicSymbolStreamIndex() < getNumStreams();
}
diff --git a/lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp b/lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp
index 12b0c3b36c1d..9f35fd73629c 100644
--- a/lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp
+++ b/lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp
@@ -18,6 +18,7 @@
#include "llvm/DebugInfo/PDB/Native/InfoStream.h"
#include "llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
+#include "llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h"
#include "llvm/DebugInfo/PDB/Native/RawError.h"
#include "llvm/DebugInfo/PDB/Native/TpiStream.h"
#include "llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h"
@@ -33,6 +34,8 @@ using namespace llvm::support;
PDBFileBuilder::PDBFileBuilder(BumpPtrAllocator &Allocator)
: Allocator(Allocator) {}
+PDBFileBuilder::~PDBFileBuilder() {}
+
Error PDBFileBuilder::initialize(uint32_t BlockSize) {
auto ExpectedMsf = MSFBuilder::create(Allocator, BlockSize);
if (!ExpectedMsf)
@@ -71,6 +74,12 @@ PDBStringTableBuilder &PDBFileBuilder::getStringTableBuilder() {
return Strings;
}
+PublicsStreamBuilder &PDBFileBuilder::getPublicsBuilder() {
+ if (!Publics)
+ Publics = llvm::make_unique<PublicsStreamBuilder>(*Msf);
+ return *Publics;
+}
+
Error PDBFileBuilder::addNamedStream(StringRef Name, uint32_t Size) {
auto ExpectedStream = Msf->addStream(Size);
if (!ExpectedStream)
@@ -96,8 +105,6 @@ Expected<msf::MSFLayout> PDBFileBuilder::finalizeMsfLayout() {
return std::move(EC);
if (auto EC = addNamedStream("/LinkInfo", 0))
return std::move(EC);
- if (auto EC = addNamedStream("/src/headerblock", 0))
- return std::move(EC);
if (Info) {
if (auto EC = Info->finalizeMsfLayout())
@@ -115,6 +122,14 @@ Expected<msf::MSFLayout> PDBFileBuilder::finalizeMsfLayout() {
if (auto EC = Ipi->finalizeMsfLayout())
return std::move(EC);
}
+ if (Publics) {
+ if (auto EC = Publics->finalizeMsfLayout())
+ return std::move(EC);
+ if (Dbi) {
+ Dbi->setPublicsStreamIndex(Publics->getStreamIndex());
+ Dbi->setSymbolRecordStreamIndex(Publics->getRecordStreamIdx());
+ }
+ }
return Msf->build();
}
@@ -194,5 +209,13 @@ Error PDBFileBuilder::commit(StringRef Filename) {
return EC;
}
+ if (Publics) {
+ auto PS = WritableMappedBlockStream::createIndexedStream(
+ Layout, Buffer, Publics->getStreamIndex(), Allocator);
+ BinaryStreamWriter PSWriter(*PS);
+ if (auto EC = Publics->commit(PSWriter))
+ return EC;
+ }
+
return Buffer.commit();
}
diff --git a/lib/DebugInfo/PDB/Native/PDBStringTable.cpp b/lib/DebugInfo/PDB/Native/PDBStringTable.cpp
index f9f8ac219d35..acd45f7a6219 100644
--- a/lib/DebugInfo/PDB/Native/PDBStringTable.cpp
+++ b/lib/DebugInfo/PDB/Native/PDBStringTable.cpp
@@ -21,7 +21,7 @@ using namespace llvm;
using namespace llvm::support;
using namespace llvm::pdb;
-uint32_t PDBStringTable::getByteSize() const { return ByteSize; }
+uint32_t PDBStringTable::getByteSize() const { return Header->ByteSize; }
uint32_t PDBStringTable::getNameCount() const { return NameCount; }
uint32_t PDBStringTable::getHashVersion() const { return Header->HashVersion; }
uint32_t PDBStringTable::getSignature() const { return Header->Signature; }
diff --git a/lib/DebugInfo/PDB/Native/PublicsStream.cpp b/lib/DebugInfo/PDB/Native/PublicsStream.cpp
index 8f3474b9ce19..9c3e654f808b 100644
--- a/lib/DebugInfo/PDB/Native/PublicsStream.cpp
+++ b/lib/DebugInfo/PDB/Native/PublicsStream.cpp
@@ -41,19 +41,6 @@ using namespace llvm::msf;
using namespace llvm::support;
using namespace llvm::pdb;
-// This is PSGSIHDR struct defined in
-// https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.h
-struct PublicsStream::HeaderInfo {
- ulittle32_t SymHash;
- ulittle32_t AddrMap;
- ulittle32_t NumThunks;
- ulittle32_t SizeOfThunk;
- ulittle16_t ISectThunkTable;
- char Padding[2];
- ulittle32_t OffThunkTable;
- ulittle32_t NumSections;
-};
-
PublicsStream::PublicsStream(PDBFile &File,
std::unique_ptr<MappedBlockStream> Stream)
: Pdb(File), Stream(std::move(Stream)) {}
@@ -72,7 +59,8 @@ Error PublicsStream::reload() {
BinaryStreamReader Reader(*Stream);
// Check stream size.
- if (Reader.bytesRemaining() < sizeof(HeaderInfo) + sizeof(GSIHashHeader))
+ if (Reader.bytesRemaining() <
+ sizeof(PublicsStreamHeader) + sizeof(GSIHashHeader))
return make_error<RawError>(raw_error_code::corrupt_file,
"Publics Stream does not contain a header.");
diff --git a/lib/DebugInfo/PDB/Native/PublicsStreamBuilder.cpp b/lib/DebugInfo/PDB/Native/PublicsStreamBuilder.cpp
new file mode 100644
index 000000000000..28c4a8fc35d9
--- /dev/null
+++ b/lib/DebugInfo/PDB/Native/PublicsStreamBuilder.cpp
@@ -0,0 +1,89 @@
+//===- DbiStreamBuilder.cpp - PDB Dbi Stream Creation -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h"
+
+#include "llvm/DebugInfo/MSF/MSFBuilder.h"
+#include "llvm/DebugInfo/MSF/MSFCommon.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+
+#include "GSI.h"
+
+using namespace llvm;
+using namespace llvm::msf;
+using namespace llvm::pdb;
+
+PublicsStreamBuilder::PublicsStreamBuilder(msf::MSFBuilder &Msf) : Msf(Msf) {}
+
+PublicsStreamBuilder::~PublicsStreamBuilder() {}
+
+uint32_t PublicsStreamBuilder::calculateSerializedLength() const {
+ uint32_t Size = 0;
+ Size += sizeof(PublicsStreamHeader);
+ Size += sizeof(GSIHashHeader);
+ Size += HashRecords.size() * sizeof(PSHashRecord);
+ size_t BitmapSizeInBits = alignTo(IPHR_HASH + 1, 32);
+ uint32_t NumBitmapEntries = BitmapSizeInBits / 8;
+ Size += NumBitmapEntries;
+
+ // FIXME: Account for hash buckets. For now since we we write a zero-bitmap
+ // indicating that no hash buckets are valid, we also write zero byets of hash
+ // bucket data.
+ Size += 0;
+ return Size;
+}
+
+Error PublicsStreamBuilder::finalizeMsfLayout() {
+ Expected<uint32_t> Idx = Msf.addStream(calculateSerializedLength());
+ if (!Idx)
+ return Idx.takeError();
+ StreamIdx = *Idx;
+
+ Expected<uint32_t> RecordIdx = Msf.addStream(0);
+ if (!RecordIdx)
+ return RecordIdx.takeError();
+ RecordStreamIdx = *RecordIdx;
+ return Error::success();
+}
+
+Error PublicsStreamBuilder::commit(BinaryStreamWriter &PublicsWriter) {
+ PublicsStreamHeader PSH;
+ GSIHashHeader GSH;
+
+ // FIXME: Figure out what to put for these values.
+ PSH.AddrMap = 0;
+ PSH.ISectThunkTable = 0;
+ PSH.NumSections = 0;
+ PSH.NumThunks = 0;
+ PSH.OffThunkTable = 0;
+ PSH.SizeOfThunk = 0;
+ PSH.SymHash = 0;
+
+ GSH.VerSignature = GSIHashHeader::HdrSignature;
+ GSH.VerHdr = GSIHashHeader::HdrVersion;
+ GSH.HrSize = 0;
+ GSH.NumBuckets = 0;
+
+ if (auto EC = PublicsWriter.writeObject(PSH))
+ return EC;
+ if (auto EC = PublicsWriter.writeObject(GSH))
+ return EC;
+ if (auto EC = PublicsWriter.writeArray(makeArrayRef(HashRecords)))
+ return EC;
+
+ size_t BitmapSizeInBits = alignTo(IPHR_HASH + 1, 32);
+ uint32_t NumBitmapEntries = BitmapSizeInBits / 8;
+ std::vector<uint8_t> BitmapData(NumBitmapEntries);
+ // FIXME: Build an actual bitmap
+ if (auto EC = PublicsWriter.writeBytes(makeArrayRef(BitmapData)))
+ return EC;
+
+ // FIXME: Write actual hash buckets.
+ return Error::success();
+}
diff --git a/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index ff8749fbfed4..1164d60ffc10 100644
--- a/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -317,7 +317,13 @@ uint64_t MCJIT::getSymbolAddress(const std::string &Name,
raw_string_ostream MangledNameStream(MangledName);
Mangler::getNameWithPrefix(MangledNameStream, Name, getDataLayout());
}
- return findSymbol(MangledName, CheckFunctionsOnly).getAddress();
+ if (auto Sym = findSymbol(MangledName, CheckFunctionsOnly)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ return *AddrOrErr;
+ else
+ report_fatal_error(AddrOrErr.takeError());
+ } else
+ report_fatal_error(Sym.takeError());
}
JITSymbol MCJIT::findSymbol(const std::string &Name,
@@ -599,11 +605,12 @@ GenericValue MCJIT::runFunction(Function *F, ArrayRef<GenericValue> ArgValues) {
void *MCJIT::getPointerToNamedFunction(StringRef Name, bool AbortOnFailure) {
if (!isSymbolSearchingDisabled()) {
- void *ptr =
- reinterpret_cast<void*>(
- static_cast<uintptr_t>(Resolver.findSymbol(Name).getAddress()));
- if (ptr)
- return ptr;
+ if (auto Sym = Resolver.findSymbol(Name)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ return reinterpret_cast<void*>(
+ static_cast<uintptr_t>(*AddrOrErr));
+ } else if (auto Err = Sym.takeError())
+ report_fatal_error(std::move(Err));
}
/// If a LazyFunctionCreator is installed, use it to get/create the function.
diff --git a/lib/ExecutionEngine/Orc/OrcCBindings.cpp b/lib/ExecutionEngine/Orc/OrcCBindings.cpp
index 5fe259f80b6f..de80cb1d0dd4 100644
--- a/lib/ExecutionEngine/Orc/OrcCBindings.cpp
+++ b/lib/ExecutionEngine/Orc/OrcCBindings.cpp
@@ -60,12 +60,13 @@ void LLVMOrcGetMangledSymbol(LLVMOrcJITStackRef JITStack, char **MangledName,
void LLVMOrcDisposeMangledSymbol(char *MangledName) { delete[] MangledName; }
-LLVMOrcTargetAddress
+LLVMOrcErrorCode
LLVMOrcCreateLazyCompileCallback(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
LLVMOrcLazyCompileCallbackFn Callback,
void *CallbackCtx) {
OrcCBindingsStack &J = *unwrap(JITStack);
- return J.createLazyCompileCallback(Callback, CallbackCtx);
+ return J.createLazyCompileCallback(*RetAddr, Callback, CallbackCtx);
}
LLVMOrcErrorCode LLVMOrcCreateIndirectStub(LLVMOrcJITStackRef JITStack,
@@ -82,38 +83,44 @@ LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
return J.setIndirectStubPointer(StubName, NewAddr);
}
-LLVMOrcModuleHandle
+LLVMOrcErrorCode
LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
LLVMSharedModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx) {
OrcCBindingsStack &J = *unwrap(JITStack);
std::shared_ptr<Module> *M(unwrap(Mod));
- return J.addIRModuleEager(*M, SymbolResolver, SymbolResolverCtx);
+ return J.addIRModuleEager(*RetHandle, *M, SymbolResolver, SymbolResolverCtx);
}
-LLVMOrcModuleHandle
+LLVMOrcErrorCode
LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
LLVMSharedModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx) {
OrcCBindingsStack &J = *unwrap(JITStack);
std::shared_ptr<Module> *M(unwrap(Mod));
- return J.addIRModuleLazy(*M, SymbolResolver, SymbolResolverCtx);
+ return J.addIRModuleLazy(*RetHandle, *M, SymbolResolver, SymbolResolverCtx);
}
-void LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack, LLVMOrcModuleHandle H) {
+LLVMOrcErrorCode LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle H) {
OrcCBindingsStack &J = *unwrap(JITStack);
- J.removeModule(H);
+ return J.removeModule(H);
}
-LLVMOrcTargetAddress LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
- const char *SymbolName) {
+LLVMOrcErrorCode LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
+ const char *SymbolName) {
OrcCBindingsStack &J = *unwrap(JITStack);
- auto Sym = J.findSymbol(SymbolName, true);
- return Sym.getAddress();
+ return J.findSymbolAddress(*RetAddr, SymbolName, true);
}
-void LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack) {
- delete unwrap(JITStack);
+LLVMOrcErrorCode LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack) {
+ auto *J = unwrap(JITStack);
+ auto Err = J->shutdown();
+ delete J;
+ return Err;
}
diff --git a/lib/ExecutionEngine/Orc/OrcCBindingsStack.h b/lib/ExecutionEngine/Orc/OrcCBindingsStack.h
index 931d0a9eb2ad..e38decf94f3e 100644
--- a/lib/ExecutionEngine/Orc/OrcCBindingsStack.h
+++ b/lib/ExecutionEngine/Orc/OrcCBindingsStack.h
@@ -70,7 +70,7 @@ private:
virtual JITSymbol findSymbolIn(const std::string &Name,
bool ExportedSymbolsOnly) = 0;
- virtual void removeModule() = 0;
+ virtual Error removeModule() = 0;
};
template <typename LayerT> class GenericHandleImpl : public GenericHandle {
@@ -83,7 +83,7 @@ private:
return Layer.findSymbolIn(Handle, Name, ExportedSymbolsOnly);
}
- void removeModule() override { return Layer.removeModule(Handle); }
+ Error removeModule() override { return Layer.removeModule(Handle); }
private:
LayerT &Layer;
@@ -105,6 +105,10 @@ public:
IndirectStubsManagerBuilder IndirectStubsMgrBuilder)
: DL(TM.createDataLayout()), IndirectStubsMgr(IndirectStubsMgrBuilder()),
CCMgr(std::move(CCMgr)),
+ ObjectLayer(
+ []() {
+ return std::make_shared<SectionMemoryManager>();
+ }),
CompileLayer(ObjectLayer, orc::SimpleCompiler(TM)),
CODLayer(CompileLayer,
[](Function &F) { return std::set<Function *>({&F}); },
@@ -112,12 +116,14 @@ public:
CXXRuntimeOverrides(
[this](const std::string &S) { return mangle(S); }) {}
- ~OrcCBindingsStack() {
+ LLVMOrcErrorCode shutdown() {
// Run any destructors registered with __cxa_atexit.
CXXRuntimeOverrides.runDestructors();
// Run any IR destructors.
for (auto &DtorRunner : IRStaticDestructorRunners)
- DtorRunner.runViaLayer(*this);
+ if (auto Err = DtorRunner.runViaLayer(*this))
+ return mapError(std::move(Err));
+ return LLVMOrcErrSuccess;
}
std::string mangle(StringRef Name) {
@@ -134,14 +140,17 @@ public:
return reinterpret_cast<PtrTy>(static_cast<uintptr_t>(Addr));
}
- JITTargetAddress
- createLazyCompileCallback(LLVMOrcLazyCompileCallbackFn Callback,
+
+ LLVMOrcErrorCode
+ createLazyCompileCallback(JITTargetAddress &RetAddr,
+ LLVMOrcLazyCompileCallbackFn Callback,
void *CallbackCtx) {
auto CCInfo = CCMgr->getCompileCallback();
CCInfo.setCompileAction([=]() -> JITTargetAddress {
return Callback(wrap(this), CallbackCtx);
});
- return CCInfo.getAddress();
+ RetAddr = CCInfo.getAddress();
+ return LLVMOrcErrSuccess;
}
LLVMOrcErrorCode createIndirectStub(StringRef StubName,
@@ -155,12 +164,12 @@ public:
return mapError(IndirectStubsMgr->updatePointer(Name, Addr));
}
- std::unique_ptr<JITSymbolResolver>
+ std::shared_ptr<JITSymbolResolver>
createResolver(LLVMOrcSymbolResolverFn ExternalResolver,
void *ExternalResolverCtx) {
return orc::createLambdaResolver(
[this, ExternalResolver, ExternalResolverCtx](const std::string &Name)
- -> JITSymbol {
+ -> JITSymbol {
// Search order:
// 1. JIT'd symbols.
// 2. Runtime overrides.
@@ -168,6 +177,9 @@ public:
if (auto Sym = CODLayer.findSymbol(Name, true))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return Sym.takeError();
+
if (auto Sym = CXXRuntimeOverrides.searchOverrides(Name))
return Sym;
@@ -178,16 +190,19 @@ public:
return JITSymbol(nullptr);
},
- [](const std::string &Name) {
+ [](const std::string &Name) -> JITSymbol {
return JITSymbol(nullptr);
});
}
template <typename LayerT>
- ModuleHandleT addIRModule(LayerT &Layer, std::shared_ptr<Module> M,
- std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr,
- LLVMOrcSymbolResolverFn ExternalResolver,
- void *ExternalResolverCtx) {
+ LLVMOrcErrorCode
+ addIRModule(ModuleHandleT &RetHandle, LayerT &Layer,
+ std::shared_ptr<Module> M,
+ std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+
// Attach a data-layout if one isn't already present.
if (M->getDataLayout().isDefault())
M->setDataLayout(DL);
@@ -204,43 +219,52 @@ public:
auto Resolver = createResolver(ExternalResolver, ExternalResolverCtx);
// Add the module to the JIT.
- auto LH = Layer.addModule(std::move(M), std::move(MemMgr),
- std::move(Resolver));
- ModuleHandleT H = createHandle(Layer, LH);
+ ModuleHandleT H;
+ if (auto LHOrErr = Layer.addModule(std::move(M), std::move(Resolver)))
+ H = createHandle(Layer, *LHOrErr);
+ else
+ return mapError(LHOrErr.takeError());
// Run the static constructors, and save the static destructor runner for
// execution when the JIT is torn down.
orc::CtorDtorRunner<OrcCBindingsStack> CtorRunner(std::move(CtorNames), H);
- CtorRunner.runViaLayer(*this);
+ if (auto Err = CtorRunner.runViaLayer(*this))
+ return mapError(std::move(Err));
IRStaticDestructorRunners.emplace_back(std::move(DtorNames), H);
- return H;
+ RetHandle = H;
+ return LLVMOrcErrSuccess;
}
- ModuleHandleT addIRModuleEager(std::shared_ptr<Module> M,
- LLVMOrcSymbolResolverFn ExternalResolver,
- void *ExternalResolverCtx) {
- return addIRModule(CompileLayer, std::move(M),
+ LLVMOrcErrorCode addIRModuleEager(ModuleHandleT &RetHandle,
+ std::shared_ptr<Module> M,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ return addIRModule(RetHandle, CompileLayer, std::move(M),
llvm::make_unique<SectionMemoryManager>(),
std::move(ExternalResolver), ExternalResolverCtx);
}
- ModuleHandleT addIRModuleLazy(std::shared_ptr<Module> M,
- LLVMOrcSymbolResolverFn ExternalResolver,
- void *ExternalResolverCtx) {
- return addIRModule(CODLayer, std::move(M),
+ LLVMOrcErrorCode addIRModuleLazy(ModuleHandleT &RetHandle,
+ std::shared_ptr<Module> M,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ return addIRModule(RetHandle, CODLayer, std::move(M),
llvm::make_unique<SectionMemoryManager>(),
std::move(ExternalResolver), ExternalResolverCtx);
}
- void removeModule(ModuleHandleT H) {
- GenericHandles[H]->removeModule();
+ LLVMOrcErrorCode removeModule(ModuleHandleT H) {
+ if (auto Err = GenericHandles[H]->removeModule())
+ return mapError(std::move(Err));
GenericHandles[H] = nullptr;
FreeHandleIndexes.push_back(H);
+ return LLVMOrcErrSuccess;
}
- JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ JITSymbol findSymbol(const std::string &Name,
+ bool ExportedSymbolsOnly) {
if (auto Sym = IndirectStubsMgr->findStub(Name, ExportedSymbolsOnly))
return Sym;
return CODLayer.findSymbol(mangle(Name), ExportedSymbolsOnly);
@@ -251,6 +275,26 @@ public:
return GenericHandles[H]->findSymbolIn(Name, ExportedSymbolsOnly);
}
+ LLVMOrcErrorCode findSymbolAddress(JITTargetAddress &RetAddr,
+ const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ RetAddr = 0;
+ if (auto Sym = findSymbol(Name, ExportedSymbolsOnly)) {
+ // Successful lookup, non-null symbol:
+ if (auto AddrOrErr = Sym.getAddress()) {
+ RetAddr = *AddrOrErr;
+ return LLVMOrcErrSuccess;
+ } else
+ return mapError(AddrOrErr.takeError());
+ } else if (auto Err = Sym.takeError()) {
+ // Lookup failure - report error.
+ return mapError(std::move(Err));
+ }
+ // Otherwise we had a successful lookup but got a null result. We already
+ // set RetAddr to '0' above, so just return success.
+ return LLVMOrcErrSuccess;
+ }
+
const std::string &getErrorMessage() const { return ErrMsg; }
private:
diff --git a/lib/ExecutionEngine/Orc/OrcError.cpp b/lib/ExecutionEngine/Orc/OrcError.cpp
index 9e70c4ac1dbf..df2d320e0f7a 100644
--- a/lib/ExecutionEngine/Orc/OrcError.cpp
+++ b/lib/ExecutionEngine/Orc/OrcError.cpp
@@ -45,6 +45,8 @@ public:
return "Could not negotiate RPC function";
case OrcErrorCode::RPCResponseAbandoned:
return "RPC response abandoned";
+ case OrcErrorCode::JITSymbolNotFound:
+ return "JIT symbol not found";
case OrcErrorCode::UnexpectedRPCCall:
return "Unexpected RPC call";
case OrcErrorCode::UnexpectedRPCResponse:
@@ -63,10 +65,29 @@ static ManagedStatic<OrcErrorCategory> OrcErrCat;
namespace llvm {
namespace orc {
+char JITSymbolNotFound::ID = 0;
+
std::error_code orcError(OrcErrorCode ErrCode) {
typedef std::underlying_type<OrcErrorCode>::type UT;
return std::error_code(static_cast<UT>(ErrCode), *OrcErrCat);
}
+JITSymbolNotFound::JITSymbolNotFound(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code JITSymbolNotFound::convertToErrorCode() const {
+ typedef std::underlying_type<OrcErrorCode>::type UT;
+ return std::error_code(static_cast<UT>(OrcErrorCode::JITSymbolNotFound),
+ *OrcErrCat);
+}
+
+void JITSymbolNotFound::log(raw_ostream &OS) const {
+ OS << "Could not find symbol '" << SymbolName << "'";
+}
+
+const std::string &JITSymbolNotFound::getSymbolName() const {
+ return SymbolName;
+}
+
}
}
diff --git a/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h b/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
index 690276232a6f..346a40405ff1 100644
--- a/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
+++ b/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
@@ -172,10 +172,13 @@ public:
std::shared_ptr<JITSymbolResolver> ClientResolver,
std::unique_ptr<TargetMachine> TM)
: ExecutionEngine(TM->createDataLayout()), TM(std::move(TM)),
- MemMgr(*this, std::move(MemMgr)), Resolver(*this),
+ MemMgr(std::make_shared<MCJITReplacementMemMgr>(*this,
+ std::move(MemMgr))),
+ Resolver(std::make_shared<LinkingResolver>(*this)),
ClientResolver(std::move(ClientResolver)), NotifyObjectLoaded(*this),
NotifyFinalized(*this),
- ObjectLayer(NotifyObjectLoaded, NotifyFinalized),
+ ObjectLayer([this]() { return this->MemMgr; }, NotifyObjectLoaded,
+ NotifyFinalized),
CompileLayer(ObjectLayer, SimpleCompiler(*this->TM)),
LazyEmitLayer(CompileLayer) {}
@@ -199,20 +202,20 @@ public:
delete Mod;
};
LocalModules.push_back(std::shared_ptr<Module>(MPtr, std::move(Deleter)));
- LazyEmitLayer.addModule(LocalModules.back(), &MemMgr, &Resolver);
+ cantFail(LazyEmitLayer.addModule(LocalModules.back(), Resolver));
}
void addObjectFile(std::unique_ptr<object::ObjectFile> O) override {
auto Obj =
std::make_shared<object::OwningBinary<object::ObjectFile>>(std::move(O),
nullptr);
- ObjectLayer.addObject(std::move(Obj), &MemMgr, &Resolver);
+ cantFail(ObjectLayer.addObject(std::move(Obj), Resolver));
}
void addObjectFile(object::OwningBinary<object::ObjectFile> O) override {
auto Obj =
std::make_shared<object::OwningBinary<object::ObjectFile>>(std::move(O));
- ObjectLayer.addObject(std::move(Obj), &MemMgr, &Resolver);
+ cantFail(ObjectLayer.addObject(std::move(Obj), Resolver));
}
void addArchive(object::OwningBinary<object::Archive> A) override {
@@ -231,7 +234,7 @@ public:
}
uint64_t getSymbolAddress(StringRef Name) {
- return findSymbol(Name).getAddress();
+ return cantFail(findSymbol(Name).getAddress());
}
JITSymbol findSymbol(StringRef Name) {
@@ -320,7 +323,7 @@ private:
auto Obj =
std::make_shared<object::OwningBinary<object::ObjectFile>>(
std::move(ChildObj), nullptr);
- ObjectLayer.addObject(std::move(Obj), &MemMgr, &Resolver);
+ cantFail(ObjectLayer.addObject(std::move(Obj), Resolver));
if (auto Sym = ObjectLayer.findSymbol(Name, true))
return Sym;
}
@@ -341,7 +344,7 @@ private:
const LoadedObjectInfo &Info) const {
M.UnfinalizedSections[H] = std::move(M.SectionsAllocatedSinceLastLoad);
M.SectionsAllocatedSinceLastLoad = SectionAddrSet();
- M.MemMgr.notifyObjectLoaded(&M, *Obj->getBinary());
+ M.MemMgr->notifyObjectLoaded(&M, *Obj->getBinary());
}
private:
OrcMCJITReplacement &M;
@@ -373,8 +376,8 @@ private:
using LazyEmitLayerT = LazyEmittingLayer<CompileLayerT>;
std::unique_ptr<TargetMachine> TM;
- MCJITReplacementMemMgr MemMgr;
- LinkingResolver Resolver;
+ std::shared_ptr<MCJITReplacementMemMgr> MemMgr;
+ std::shared_ptr<LinkingResolver> Resolver;
std::shared_ptr<JITSymbolResolver> ClientResolver;
Mangler Mang;
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index 2b69f1a0269f..8198836f7a0c 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -128,7 +128,10 @@ void RuntimeDyldImpl::resolveRelocations() {
);
// First, resolve relocations associated with external symbols.
- resolveExternalSymbols();
+ if (auto Err = resolveExternalSymbols()) {
+ HasError = true;
+ ErrorStr = toString(std::move(Err));
+ }
// Iterate over all outstanding relocations
for (auto it = Relocations.begin(), e = Relocations.end(); it != e; ++it) {
@@ -243,9 +246,11 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
continue;
// Then check the symbol resolver to see if there's a definition
// elsewhere in this logical dylib.
- if (auto Sym = Resolver.findSymbolInLogicalDylib(Name))
+ if (auto Sym = Resolver.findSymbolInLogicalDylib(Name)) {
if (Sym.getFlags().isStrongDefinition())
continue;
+ } else if (auto Err = Sym.takeError())
+ return std::move(Err);
// else
JITSymFlags &= ~JITSymbolFlags::Weak;
}
@@ -953,7 +958,7 @@ void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
}
}
-void RuntimeDyldImpl::resolveExternalSymbols() {
+Error RuntimeDyldImpl::resolveExternalSymbols() {
while (!ExternalSymbolRelocations.empty()) {
StringMap<RelocationList>::iterator i = ExternalSymbolRelocations.begin();
@@ -971,10 +976,24 @@ void RuntimeDyldImpl::resolveExternalSymbols() {
// This is an external symbol, try to get its address from the symbol
// resolver.
// First search for the symbol in this logical dylib.
- Addr = Resolver.findSymbolInLogicalDylib(Name.data()).getAddress();
+ if (auto Sym = Resolver.findSymbolInLogicalDylib(Name.data())) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Addr = *AddrOrErr;
+ else
+ return AddrOrErr.takeError();
+ } else if (auto Err = Sym.takeError())
+ return Err;
+
// If that fails, try searching for an external symbol.
- if (!Addr)
- Addr = Resolver.findSymbol(Name.data()).getAddress();
+ if (!Addr) {
+ if (auto Sym = Resolver.findSymbol(Name.data())) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Addr = *AddrOrErr;
+ else
+ return AddrOrErr.takeError();
+ } else if (auto Err = Sym.takeError())
+ return Err;
+ }
// The call to getSymbolAddress may have caused additional modules to
// be loaded, which may have added new entries to the
// ExternalSymbolRelocations map. Consquently, we need to update our
@@ -1009,6 +1028,8 @@ void RuntimeDyldImpl::resolveExternalSymbols() {
ExternalSymbolRelocations.erase(i);
}
+
+ return Error::success();
}
//===----------------------------------------------------------------------===//
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
index 1bd28ef37ed1..1c54ad6fb03f 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
@@ -27,9 +27,12 @@ using namespace llvm::object;
namespace {
class LoadedCOFFObjectInfo final
- : public RuntimeDyld::LoadedObjectInfoHelper<LoadedCOFFObjectInfo> {
+ : public LoadedObjectInfoHelper<LoadedCOFFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
public:
- LoadedCOFFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
+ LoadedCOFFObjectInfo(
+ RuntimeDyldImpl &RTDyld,
+ RuntimeDyld::LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
: LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
OwningBinary<ObjectFile>
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
index e45fdc7aee18..5bc7434e703f 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -742,7 +742,7 @@ uint64_t RuntimeDyldCheckerImpl::getSymbolLocalAddr(StringRef Symbol) const {
uint64_t RuntimeDyldCheckerImpl::getSymbolRemoteAddr(StringRef Symbol) const {
if (auto InternalSymbol = getRTDyld().getSymbol(Symbol))
return InternalSymbol.getAddress();
- return getRTDyld().Resolver.findSymbol(Symbol).getAddress();
+ return cantFail(getRTDyld().Resolver.findSymbol(Symbol).getAddress());
}
uint64_t RuntimeDyldCheckerImpl::readMemoryAtAddr(uint64_t SrcAddr,
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index 8b6f9bef66df..77c968401c16 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -123,7 +123,8 @@ void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
}
class LoadedELFObjectInfo final
- : public RuntimeDyld::LoadedObjectInfoHelper<LoadedELFObjectInfo> {
+ : public LoadedObjectInfoHelper<LoadedELFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
public:
LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
: LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
index 5268bc5a1868..95b04fd93251 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -417,7 +417,7 @@ protected:
StubMap &Stubs) = 0;
/// \brief Resolve relocations to external symbols.
- void resolveExternalSymbols();
+ Error resolveExternalSymbols();
// \brief Compute an upper bound of the memory that is required to load all
// sections
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
index 00541e8c06fe..80e9c7ac18aa 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -27,7 +27,8 @@ using namespace llvm::object;
namespace {
class LoadedMachOObjectInfo final
- : public RuntimeDyld::LoadedObjectInfoHelper<LoadedMachOObjectInfo> {
+ : public LoadedObjectInfoHelper<LoadedMachOObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
public:
LoadedMachOObjectInfo(RuntimeDyldImpl &RTDyld,
ObjSectionToIDMap ObjSecToIDMap)
diff --git a/lib/Fuzzer/CMakeLists.txt b/lib/Fuzzer/CMakeLists.txt
index b886021aee3f..fa743c280e86 100644
--- a/lib/Fuzzer/CMakeLists.txt
+++ b/lib/Fuzzer/CMakeLists.txt
@@ -13,6 +13,7 @@ if( APPLE )
endif()
endif()
+set(LIBFUZZER_FLAGS_BASE "${CMAKE_CXX_FLAGS}")
if( LLVM_USE_SANITIZE_COVERAGE )
if(NOT "${LLVM_USE_SANITIZER}" STREQUAL "Address")
message(FATAL_ERROR
@@ -20,7 +21,6 @@ if( LLVM_USE_SANITIZE_COVERAGE )
"LLVM_USE_SANITIZE_COVERAGE=YES to be set."
)
endif()
- set(LIBFUZZER_FLAGS_BASE "${CMAKE_CXX_FLAGS}")
# Disable the coverage and sanitizer instrumentation for the fuzzer itself.
set(CMAKE_CXX_FLAGS "${LIBFUZZER_FLAGS_BASE} -fno-sanitize-coverage=trace-pc-guard,edge,trace-cmp,indirect-calls,8bit-counters -Werror")
diff --git a/lib/Fuzzer/FuzzerCorpus.h b/lib/Fuzzer/FuzzerCorpus.h
index 0f0573994a03..218ae5b6ac4d 100644
--- a/lib/Fuzzer/FuzzerCorpus.h
+++ b/lib/Fuzzer/FuzzerCorpus.h
@@ -34,6 +34,7 @@ struct InputInfo {
size_t NumExecutedMutations = 0;
size_t NumSuccessfullMutations = 0;
bool MayDeleteFile = false;
+ std::vector<uint32_t> FeatureSet;
};
class InputCorpus {
@@ -68,24 +69,84 @@ class InputCorpus {
}
bool empty() const { return Inputs.empty(); }
const Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }
- void AddToCorpus(const Unit &U, size_t NumFeatures,
- bool MayDeleteFile = false) {
+ void AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,
+ const std::vector<uint32_t> &FeatureSet) {
assert(!U.empty());
- uint8_t Hash[kSHA1NumBytes];
if (FeatureDebug)
Printf("ADD_TO_CORPUS %zd NF %zd\n", Inputs.size(), NumFeatures);
- ComputeSHA1(U.data(), U.size(), Hash);
- Hashes.insert(Sha1ToString(Hash));
Inputs.push_back(new InputInfo());
InputInfo &II = *Inputs.back();
II.U = U;
II.NumFeatures = NumFeatures;
II.MayDeleteFile = MayDeleteFile;
- memcpy(II.Sha1, Hash, kSHA1NumBytes);
+ II.FeatureSet = FeatureSet;
+ ComputeSHA1(U.data(), U.size(), II.Sha1);
+ Hashes.insert(Sha1ToString(II.Sha1));
UpdateCorpusDistribution();
+ PrintCorpus();
// ValidateFeatureSet();
}
+ // Debug-only
+ void PrintUnit(const Unit &U) {
+ if (!FeatureDebug) return;
+ for (uint8_t C : U) {
+ if (C != 'F' && C != 'U' && C != 'Z')
+ C = '.';
+ Printf("%c", C);
+ }
+ }
+
+ // Debug-only
+ void PrintFeatureSet(const std::vector<uint32_t> &FeatureSet) {
+ if (!FeatureDebug) return;
+ Printf("{");
+ for (uint32_t Feature: FeatureSet)
+ Printf("%u,", Feature);
+ Printf("}");
+ }
+
+ // Debug-only
+ void PrintCorpus() {
+ if (!FeatureDebug) return;
+ Printf("======= CORPUS:\n");
+ int i = 0;
+ for (auto II : Inputs) {
+ if (std::find(II->U.begin(), II->U.end(), 'F') != II->U.end()) {
+ Printf("[%2d] ", i);
+ Printf("%s sz=%zd ", Sha1ToString(II->Sha1).c_str(), II->U.size());
+ PrintUnit(II->U);
+ Printf(" ");
+ PrintFeatureSet(II->FeatureSet);
+ Printf("\n");
+ }
+ i++;
+ }
+ }
+
+ // If FeatureSet is that same as in II, replace II->U with {Data,Size}.
+ bool TryToReplace(InputInfo *II, const uint8_t *Data, size_t Size,
+ const std::vector<uint32_t> &FeatureSet) {
+ if (II->U.size() > Size && II->FeatureSet.size() &&
+ II->FeatureSet == FeatureSet) {
+ if (FeatureDebug)
+ Printf("Replace: %zd => %zd\n", II->U.size(), Size);
+ Replace(II, {Data, Data + Size});
+ PrintCorpus();
+ return true;
+ }
+ return false;
+ }
+
+ void Replace(InputInfo *II, const Unit &U) {
+ assert(II->U.size());
+ Hashes.erase(Sha1ToString(II->Sha1));
+ DeleteFile(*II);
+ ComputeSHA1(U.data(), U.size(), II->Sha1);
+ Hashes.insert(Sha1ToString(II->Sha1));
+ II->U = U;
+ }
+
bool HasUnit(const Unit &U) { return Hashes.count(Hash(U)); }
bool HasUnit(const std::string &H) { return Hashes.count(H); }
InputInfo &ChooseUnitToMutate(Random &Rand) {
@@ -124,10 +185,14 @@ class InputCorpus {
Printf("\n");
}
- void DeleteInput(size_t Idx) {
- InputInfo &II = *Inputs[Idx];
+ void DeleteFile(const InputInfo &II) {
if (!OutputCorpus.empty() && II.MayDeleteFile)
RemoveFile(DirPlusFile(OutputCorpus, Sha1ToString(II.Sha1)));
+ }
+
+ void DeleteInput(size_t Idx) {
+ InputInfo &II = *Inputs[Idx];
+ DeleteFile(II);
Unit().swap(II.U);
if (FeatureDebug)
Printf("EVICTED %zd\n", Idx);
diff --git a/lib/Fuzzer/FuzzerDriver.cpp b/lib/Fuzzer/FuzzerDriver.cpp
index 0453a7f443b5..87968893853e 100644
--- a/lib/Fuzzer/FuzzerDriver.cpp
+++ b/lib/Fuzzer/FuzzerDriver.cpp
@@ -265,7 +265,7 @@ int RunOneTest(Fuzzer *F, const char *InputFilePath, size_t MaxLen) {
Unit U = FileToVector(InputFilePath);
if (MaxLen && MaxLen < U.size())
U.resize(MaxLen);
- F->RunOne(U.data(), U.size());
+ F->ExecuteCallback(U.data(), U.size());
F->TryDetectingAMemoryLeak(U.data(), U.size(), true);
return 0;
}
@@ -441,7 +441,6 @@ int MinimizeCrashInputInternalStep(Fuzzer *F, InputCorpus *Corpus) {
Printf("INFO: The input is small enough, exiting\n");
exit(0);
}
- Corpus->AddToCorpus(U, 0);
F->SetMaxInputLen(U.size());
F->SetMaxMutationLen(U.size() - 1);
F->MinimizeCrashLoop(U);
@@ -572,6 +571,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
Options.UseCmp = Flags.use_cmp;
Options.UseValueProfile = Flags.use_value_profile;
Options.Shrink = Flags.shrink;
+ Options.ReduceInputs = Flags.reduce_inputs;
Options.ShuffleAtStartUp = Flags.shuffle;
Options.PreferSmall = Flags.prefer_small;
Options.ReloadIntervalSec = Flags.reload;
@@ -657,7 +657,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
size_t Size = SMR.ReadByteArraySize();
SMR.WriteByteArray(nullptr, 0);
const Unit tmp(SMR.GetByteArray(), SMR.GetByteArray() + Size);
- F->RunOne(tmp.data(), tmp.size());
+ F->ExecuteCallback(tmp.data(), tmp.size());
SMR.PostServer();
}
return 0;
diff --git a/lib/Fuzzer/FuzzerExtFunctionsWeak.cpp b/lib/Fuzzer/FuzzerExtFunctionsWeak.cpp
index 7b02b6f0b701..503f0395cf8f 100644
--- a/lib/Fuzzer/FuzzerExtFunctionsWeak.cpp
+++ b/lib/Fuzzer/FuzzerExtFunctionsWeak.cpp
@@ -41,7 +41,8 @@ namespace fuzzer {
ExternalFunctions::ExternalFunctions() {
#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
this->NAME = ::NAME; \
- CheckFnPtr((void *)::NAME, #NAME, WARN);
+ CheckFnPtr(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(::NAME)), \
+ #NAME, WARN);
#include "FuzzerExtFunctions.def"
diff --git a/lib/Fuzzer/FuzzerFlags.def b/lib/Fuzzer/FuzzerFlags.def
index 7ff196c8fa96..5e70cbad3cf1 100644
--- a/lib/Fuzzer/FuzzerFlags.def
+++ b/lib/Fuzzer/FuzzerFlags.def
@@ -65,7 +65,9 @@ FUZZER_FLAG_INT(use_memmem, 1,
FUZZER_FLAG_INT(use_value_profile, 0,
"Experimental. Use value profile to guide fuzzing.")
FUZZER_FLAG_INT(use_cmp, 1, "Use CMP traces to guide mutations")
-FUZZER_FLAG_INT(shrink, 0, "Experimental. Try to shrink corpus elements.")
+FUZZER_FLAG_INT(shrink, 0, "Experimental. Try to shrink corpus inputs.")
+FUZZER_FLAG_INT(reduce_inputs, 0, "Experimental. "
+ "Try to reduce the size of inputs wile preserving their full feature sets")
FUZZER_FLAG_UNSIGNED(jobs, 0, "Number of jobs to run. If jobs >= 1 we spawn"
" this number of jobs in separate worker processes"
" with stdout/stderr redirected to fuzz-JOB.log.")
diff --git a/lib/Fuzzer/FuzzerIOWindows.cpp b/lib/Fuzzer/FuzzerIOWindows.cpp
index 75d4e3a06071..742520267b73 100644
--- a/lib/Fuzzer/FuzzerIOWindows.cpp
+++ b/lib/Fuzzer/FuzzerIOWindows.cpp
@@ -182,7 +182,7 @@ static size_t ParseFileName(const std::string &FileName, const size_t Offset) {
return Pos - Offset;
}
-// Parse a directory ending in separator, like: SomeDir\
+// Parse a directory ending in separator, like: `SomeDir\`
// Returns number of characters considered if successful.
static size_t ParseDir(const std::string &FileName, const size_t Offset) {
size_t Pos = Offset;
@@ -197,7 +197,7 @@ static size_t ParseDir(const std::string &FileName, const size_t Offset) {
return Pos - Offset;
}
-// Parse a servername and share, like: SomeServer\SomeShare\
+// Parse a servername and share, like: `SomeServer\SomeShare\`
// Returns number of characters considered if successful.
static size_t ParseServerAndShare(const std::string &FileName,
const size_t Offset) {
diff --git a/lib/Fuzzer/FuzzerInternal.h b/lib/Fuzzer/FuzzerInternal.h
index 5f184c2316e2..a732f895375e 100644
--- a/lib/Fuzzer/FuzzerInternal.h
+++ b/lib/Fuzzer/FuzzerInternal.h
@@ -65,7 +65,8 @@ public:
static void StaticFileSizeExceedCallback();
void ExecuteCallback(const uint8_t *Data, size_t Size);
- size_t RunOne(const uint8_t *Data, size_t Size);
+ bool RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile = false,
+ InputInfo *II = nullptr);
// Merge Corpora[1:] into Corpora[0].
void Merge(const std::vector<std::string> &Corpora);
@@ -95,13 +96,12 @@ private:
void InterruptCallback();
void MutateAndTestOne();
void ReportNewCoverage(InputInfo *II, const Unit &U);
- size_t RunOne(const Unit &U) { return RunOne(U.data(), U.size()); }
+ void PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size);
void WriteToOutputCorpus(const Unit &U);
void WriteUnitToFileWithPrefix(const Unit &U, const char *Prefix);
void PrintStats(const char *Where, const char *End = "\n", size_t Units = 0);
void PrintStatusForNewUnit(const Unit &U);
void ShuffleCorpus(UnitVector *V);
- void AddToCorpus(const Unit &U);
void CheckExitOnSrcPosOrItem();
// Trace-based fuzzing: we run a unit with some kind of tracing
@@ -142,6 +142,8 @@ private:
size_t MaxInputLen = 0;
size_t MaxMutationLen = 0;
+ std::vector<uint32_t> FeatureSetTmp;
+
// Need to know our own thread.
static thread_local bool IsMyThread;
};
diff --git a/lib/Fuzzer/FuzzerLoop.cpp b/lib/Fuzzer/FuzzerLoop.cpp
index fbf18357ede6..6816f3af8a6f 100644
--- a/lib/Fuzzer/FuzzerLoop.cpp
+++ b/lib/Fuzzer/FuzzerLoop.cpp
@@ -22,9 +22,6 @@
#include <set>
#if defined(__has_include)
-#if __has_include(<sanitizer / coverage_interface.h>)
-#include <sanitizer/coverage_interface.h>
-#endif
#if __has_include(<sanitizer / lsan_interface.h>)
#include <sanitizer/lsan_interface.h>
#endif
@@ -348,11 +345,8 @@ void Fuzzer::RereadOutputCorpus(size_t MaxSize) {
if (U.size() > MaxSize)
U.resize(MaxSize);
if (!Corpus.HasUnit(U)) {
- if (size_t NumFeatures = RunOne(U)) {
- CheckExitOnSrcPosOrItem();
- Corpus.AddToCorpus(U, NumFeatures);
+ if (RunOne(U.data(), U.size()))
Reloaded = true;
- }
}
}
if (Reloaded)
@@ -377,10 +371,7 @@ void Fuzzer::ShuffleAndMinimize(UnitVector *InitialCorpus) {
ExecuteCallback(&dummy, 0);
for (const auto &U : *InitialCorpus) {
- if (size_t NumFeatures = RunOne(U)) {
- CheckExitOnSrcPosOrItem();
- Corpus.AddToCorpus(U, NumFeatures);
- }
+ RunOne(U.data(), U.size());
TryDetectingAMemoryLeak(U.data(), U.size(),
/*DuringInitialCorpusExecution*/ true);
}
@@ -392,18 +383,7 @@ void Fuzzer::ShuffleAndMinimize(UnitVector *InitialCorpus) {
}
}
-size_t Fuzzer::RunOne(const uint8_t *Data, size_t Size) {
- if (!Size) return 0;
- TotalNumberOfRuns++;
-
- ExecuteCallback(Data, Size);
-
- size_t NumUpdatesBefore = Corpus.NumFeatureUpdates();
- TPC.CollectFeatures([&](size_t Feature) {
- Corpus.AddFeature(Feature, Size, Options.Shrink);
- });
- size_t NumUpdatesAfter = Corpus.NumFeatureUpdates();
-
+void Fuzzer::PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size) {
auto TimeOfUnit =
duration_cast<seconds>(UnitStopTime - UnitStartTime).count();
if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1)) &&
@@ -415,7 +395,34 @@ size_t Fuzzer::RunOne(const uint8_t *Data, size_t Size) {
Printf("Slowest unit: %zd s:\n", TimeOfLongestUnitInSeconds);
WriteUnitToFileWithPrefix({Data, Data + Size}, "slow-unit-");
}
- return NumUpdatesAfter - NumUpdatesBefore;
+}
+
+bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
+ InputInfo *II) {
+ if (!Size) return false;
+
+ ExecuteCallback(Data, Size);
+
+ FeatureSetTmp.clear();
+ size_t NumUpdatesBefore = Corpus.NumFeatureUpdates();
+ TPC.CollectFeatures([&](size_t Feature) {
+ Corpus.AddFeature(Feature, Size, Options.Shrink);
+ if (Options.ReduceInputs)
+ FeatureSetTmp.push_back(Feature);
+ });
+ PrintPulseAndReportSlowInput(Data, Size);
+ size_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;
+ if (NumNewFeatures) {
+ Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures, MayDeleteFile,
+ FeatureSetTmp);
+ CheckExitOnSrcPosOrItem();
+ return true;
+ }
+ if (II && Corpus.TryToReplace(II, Data, Size, FeatureSetTmp)) {
+ CheckExitOnSrcPosOrItem();
+ return true;
+ }
+ return false;
}
size_t Fuzzer::GetCurrentUnitInFuzzingThead(const uint8_t **Data) const {
@@ -443,6 +450,7 @@ static bool LooseMemeq(const uint8_t *A, const uint8_t *B, size_t Size) {
}
void Fuzzer::ExecuteCallback(const uint8_t *Data, size_t Size) {
+ TotalNumberOfRuns++;
assert(InFuzzingThread());
if (SMR.IsClient())
SMR.WriteByteArray(Data, Size);
@@ -595,12 +603,9 @@ void Fuzzer::MutateAndTestOne() {
if (i == 0)
StartTraceRecording();
II.NumExecutedMutations++;
- if (size_t NumFeatures = RunOne(CurrentUnitData, Size)) {
- Corpus.AddToCorpus({CurrentUnitData, CurrentUnitData + Size}, NumFeatures,
- /*MayDeleteFile=*/true);
+ if (RunOne(CurrentUnitData, Size, /*MayDeleteFile=*/true, &II))
ReportNewCoverage(&II, {CurrentUnitData, CurrentUnitData + Size});
- CheckExitOnSrcPosOrItem();
- }
+
StopTraceRecording();
TryDetectingAMemoryLeak(CurrentUnitData, Size,
/*DuringInitialCorpusExecution*/ false);
@@ -638,7 +643,8 @@ void Fuzzer::MinimizeCrashLoop(const Unit &U) {
for (int i = 0; i < Options.MutateDepth; i++) {
size_t NewSize = MD.Mutate(CurrentUnitData, U.size(), MaxMutationLen);
assert(NewSize > 0 && NewSize <= MaxMutationLen);
- RunOne(CurrentUnitData, NewSize);
+ ExecuteCallback(CurrentUnitData, NewSize);
+ PrintPulseAndReportSlowInput(CurrentUnitData, NewSize);
TryDetectingAMemoryLeak(CurrentUnitData, NewSize,
/*DuringInitialCorpusExecution*/ false);
}
diff --git a/lib/Fuzzer/FuzzerOptions.h b/lib/Fuzzer/FuzzerOptions.h
index b1366789be00..9500235e2b1f 100644
--- a/lib/Fuzzer/FuzzerOptions.h
+++ b/lib/Fuzzer/FuzzerOptions.h
@@ -32,6 +32,7 @@ struct FuzzingOptions {
bool UseCmp = false;
bool UseValueProfile = false;
bool Shrink = false;
+ bool ReduceInputs = false;
int ReloadIntervalSec = 1;
bool ShuffleAtStartUp = true;
bool PreferSmall = true;
diff --git a/lib/Fuzzer/FuzzerUtilDarwin.cpp b/lib/Fuzzer/FuzzerUtilDarwin.cpp
index 9674368c355e..2df4872a9206 100644
--- a/lib/Fuzzer/FuzzerUtilDarwin.cpp
+++ b/lib/Fuzzer/FuzzerUtilDarwin.cpp
@@ -15,6 +15,8 @@
#include <mutex>
#include <signal.h>
#include <spawn.h>
+#include <stdlib.h>
+#include <string.h>
#include <sys/wait.h>
// There is no header for this on macOS so declare here
@@ -97,11 +99,16 @@ int ExecuteCommand(const std::string &Command) {
pid_t Pid;
char **Environ = environ; // Read from global
const char *CommandCStr = Command.c_str();
- const char *Argv[] = {"sh", "-c", CommandCStr, NULL};
+ char *const Argv[] = {
+ strdup("sh"),
+ strdup("-c"),
+ strdup(CommandCStr),
+ NULL
+ };
int ErrorCode = 0, ProcessStatus = 0;
// FIXME: We probably shouldn't hardcode the shell path.
ErrorCode = posix_spawn(&Pid, "/bin/sh", NULL, &SpawnAttributes,
- (char *const *)Argv, Environ);
+ Argv, Environ);
(void)posix_spawnattr_destroy(&SpawnAttributes);
if (!ErrorCode) {
pid_t SavedPid = Pid;
@@ -120,6 +127,8 @@ int ExecuteCommand(const std::string &Command) {
// Shell execution failure.
ProcessStatus = W_EXITCODE(127, 0);
}
+ for (unsigned i = 0, n = sizeof(Argv) / sizeof(Argv[0]); i < n; ++i)
+ free(Argv[i]);
// Restore the signal handlers of the current process when the last thread
// using this function finishes.
diff --git a/lib/Fuzzer/test/CMakeLists.txt b/lib/Fuzzer/test/CMakeLists.txt
index 1cf6c9502a2b..30566bdc87ae 100644
--- a/lib/Fuzzer/test/CMakeLists.txt
+++ b/lib/Fuzzer/test/CMakeLists.txt
@@ -118,6 +118,7 @@ set(Tests
SingleStrncmpTest
SpamyTest
ShrinkControlFlowTest
+ ShrinkControlFlowSimpleTest
ShrinkValueProfileTest
StrcmpTest
StrncmpOOBTest
@@ -271,5 +272,5 @@ add_lit_testsuite(check-fuzzer "Running Fuzzer tests"
# Don't add dependencies on Windows. The linker step would fail on Windows,
# since cmake will use link.exe for linking and won't include compiler-rt libs.
if(NOT MSVC)
- add_dependencies(check-fuzzer FileCheck sancov not)
+ add_dependencies(check-fuzzer FileCheck sancov not llvm-symbolizer)
endif()
diff --git a/lib/Fuzzer/test/FuzzerUnittest.cpp b/lib/Fuzzer/test/FuzzerUnittest.cpp
index 812894fd947f..1053c28527bf 100644
--- a/lib/Fuzzer/test/FuzzerUnittest.cpp
+++ b/lib/Fuzzer/test/FuzzerUnittest.cpp
@@ -5,6 +5,9 @@
// with ASan) involving C++ standard library types when using libcxx.
#define _LIBCPP_HAS_NO_ASAN
+// Do not attempt to use LLVM ostream from gtest.
+#define GTEST_NO_LLVM_RAW_OSTREAM 1
+
#include "FuzzerCorpus.h"
#include "FuzzerDictionary.h"
#include "FuzzerInternal.h"
@@ -590,7 +593,7 @@ TEST(Corpus, Distribution) {
size_t N = 10;
size_t TriesPerUnit = 1<<16;
for (size_t i = 0; i < N; i++)
- C->AddToCorpus(Unit{ static_cast<uint8_t>(i) }, 0);
+ C->AddToCorpus(Unit{ static_cast<uint8_t>(i) }, 0, false, {});
std::vector<size_t> Hist(N);
for (size_t i = 0; i < N * TriesPerUnit; i++) {
diff --git a/lib/Fuzzer/test/ShrinkControlFlowSimpleTest.cpp b/lib/Fuzzer/test/ShrinkControlFlowSimpleTest.cpp
new file mode 100644
index 000000000000..0afd26df23a0
--- /dev/null
+++ b/lib/Fuzzer/test/ShrinkControlFlowSimpleTest.cpp
@@ -0,0 +1,19 @@
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+
+// Test that we can find the minimal item in the corpus (3 bytes: "FUZ").
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+static volatile int Sink;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+ if (Size < 2) return 0;
+ if (Data[0] == 'F' && Data[Size / 2] == 'U' && Data[Size - 1] == 'Z')
+ Sink++;
+ return 0;
+}
+
diff --git a/lib/Fuzzer/test/reduce_inputs.test b/lib/Fuzzer/test/reduce_inputs.test
new file mode 100644
index 000000000000..a4a5c57123d3
--- /dev/null
+++ b/lib/Fuzzer/test/reduce_inputs.test
@@ -0,0 +1,13 @@
+# Test -reduce_inputs=1
+
+RUN: rm -rf %t/C
+RUN: mkdir -p %t/C
+RUN: LLVMFuzzer-ShrinkControlFlowSimpleTest -exit_on_item=0eb8e4ed029b774d80f2b66408203801cb982a60 -reduce_inputs=1 -runs=1000000 %t/C 2>&1 | FileCheck %s
+CHECK: INFO: found item with checksum '0eb8e4ed029b774d80f2b66408203801cb982a60'
+
+# Test that reduce_inputs deletes redundant files in the corpus.
+RUN: LLVMFuzzer-ShrinkControlFlowSimpleTest -runs=0 %t/C 2>&1 | FileCheck %s --check-prefix=COUNT
+COUNT: READ units: 3
+
+
+
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index c7f112887a30..80371780fb6d 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -2119,6 +2119,8 @@ class AssemblyWriter {
bool ShouldPreserveUseListOrder;
UseListOrderStack UseListOrders;
SmallVector<StringRef, 8> MDNames;
+ /// Synchronization scope names registered with LLVMContext.
+ SmallVector<StringRef, 8> SSNs;
public:
/// Construct an AssemblyWriter with an external SlotTracker
@@ -2134,10 +2136,15 @@ public:
void writeOperand(const Value *Op, bool PrintType);
void writeParamOperand(const Value *Operand, AttributeSet Attrs);
void writeOperandBundles(ImmutableCallSite CS);
- void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
- void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ void writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID);
+ void writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID);
+ void writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
void writeAllMDNodes();
void writeMDNode(unsigned Slot, const MDNode *Node);
@@ -2199,30 +2206,42 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
}
-void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
- if (Ordering == AtomicOrdering::NotAtomic)
- return;
+void AssemblyWriter::writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID) {
+ switch (SSID) {
+ case SyncScope::System: {
+ break;
+ }
+ default: {
+ if (SSNs.empty())
+ Context.getSyncScopeNames(SSNs);
- switch (SynchScope) {
- case SingleThread: Out << " singlethread"; break;
- case CrossThread: break;
+ Out << " syncscope(\"";
+ PrintEscapedString(SSNs[SSID], Out);
+ Out << "\")";
+ break;
+ }
}
+}
+
+void AssemblyWriter::writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID) {
+ if (Ordering == AtomicOrdering::NotAtomic)
+ return;
+ writeSyncScope(Context, SSID);
Out << " " << toIRString(Ordering);
}
-void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+void AssemblyWriter::writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
FailureOrdering != AtomicOrdering::NotAtomic);
- switch (SynchScope) {
- case SingleThread: Out << " singlethread"; break;
- case CrossThread: break;
- }
-
+ writeSyncScope(Context, SSID);
Out << " " << toIRString(SuccessOrdering);
Out << " " << toIRString(FailureOrdering);
}
@@ -3215,21 +3234,22 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print atomic ordering/alignment for memory operations
if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (LI->isAtomic())
- writeAtomic(LI->getOrdering(), LI->getSynchScope());
+ writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID());
if (LI->getAlignment())
Out << ", align " << LI->getAlignment();
} else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
if (SI->isAtomic())
- writeAtomic(SI->getOrdering(), SI->getSynchScope());
+ writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID());
if (SI->getAlignment())
Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
- writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(),
- CXI->getSynchScope());
+ writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(),
+ CXI->getFailureOrdering(), CXI->getSyncScopeID());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
- writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
+ writeAtomic(RMWI->getContext(), RMWI->getOrdering(),
+ RMWI->getSyncScopeID());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
- writeAtomic(FI->getOrdering(), FI->getSynchScope());
+ writeAtomic(FI->getContext(), FI->getOrdering(), FI->getSyncScopeID());
}
// Print Metadata info.
diff --git a/lib/IR/CMakeLists.txt b/lib/IR/CMakeLists.txt
index 11259cbe1815..1cc229d68bfc 100644
--- a/lib/IR/CMakeLists.txt
+++ b/lib/IR/CMakeLists.txt
@@ -43,6 +43,7 @@ add_llvm_library(LLVMCore
Pass.cpp
PassManager.cpp
PassRegistry.cpp
+ SafepointIRVerifier.cpp
ProfileSummary.cpp
Statepoint.cpp
Type.cpp
diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp
index 3469026ad7ed..23ccd8d4cf42 100644
--- a/lib/IR/ConstantFold.cpp
+++ b/lib/IR/ConstantFold.cpp
@@ -242,7 +242,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
// X | -1 -> -1.
if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS))
- if (RHSC->isAllOnesValue())
+ if (RHSC->isMinusOne())
return RHSC;
Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
@@ -1015,33 +1015,33 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
switch (Opcode) {
case Instruction::Add:
- if (CI2->equalsInt(0)) return C1; // X + 0 == X
+ if (CI2->isZero()) return C1; // X + 0 == X
break;
case Instruction::Sub:
- if (CI2->equalsInt(0)) return C1; // X - 0 == X
+ if (CI2->isZero()) return C1; // X - 0 == X
break;
case Instruction::Mul:
- if (CI2->equalsInt(0)) return C2; // X * 0 == 0
- if (CI2->equalsInt(1))
+ if (CI2->isZero()) return C2; // X * 0 == 0
+ if (CI2->isOne())
return C1; // X * 1 == X
break;
case Instruction::UDiv:
case Instruction::SDiv:
- if (CI2->equalsInt(1))
+ if (CI2->isOne())
return C1; // X / 1 == X
- if (CI2->equalsInt(0))
+ if (CI2->isZero())
return UndefValue::get(CI2->getType()); // X / 0 == undef
break;
case Instruction::URem:
case Instruction::SRem:
- if (CI2->equalsInt(1))
+ if (CI2->isOne())
return Constant::getNullValue(CI2->getType()); // X % 1 == 0
- if (CI2->equalsInt(0))
+ if (CI2->isZero())
return UndefValue::get(CI2->getType()); // X % 0 == undef
break;
case Instruction::And:
if (CI2->isZero()) return C2; // X & 0 == 0
- if (CI2->isAllOnesValue())
+ if (CI2->isMinusOne())
return C1; // X & -1 == X
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
@@ -1078,12 +1078,12 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
break;
case Instruction::Or:
- if (CI2->equalsInt(0)) return C1; // X | 0 == X
- if (CI2->isAllOnesValue())
+ if (CI2->isZero()) return C1; // X | 0 == X
+ if (CI2->isMinusOne())
return C2; // X | -1 == -1
break;
case Instruction::Xor:
- if (CI2->equalsInt(0)) return C1; // X ^ 0 == X
+ if (CI2->isZero()) return C1; // X ^ 0 == X
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
switch (CE1->getOpcode()) {
@@ -1091,7 +1091,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::ICmp:
case Instruction::FCmp:
// cmp pred ^ true -> cmp !pred
- assert(CI2->equalsInt(1));
+ assert(CI2->isOne());
CmpInst::Predicate pred = (CmpInst::Predicate)CE1->getPredicate();
pred = CmpInst::getInversePredicate(pred);
return ConstantExpr::getCompare(pred, CE1->getOperand(0),
@@ -1126,18 +1126,18 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::Mul:
return ConstantInt::get(CI1->getContext(), C1V * C2V);
case Instruction::UDiv:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V));
case Instruction::SDiv:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
return UndefValue::get(CI1->getType()); // MIN_INT / -1 -> undef
return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V));
case Instruction::URem:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
return ConstantInt::get(CI1->getContext(), C1V.urem(C2V));
case Instruction::SRem:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
return UndefValue::get(CI1->getType()); // MIN_INT % -1 -> undef
return ConstantInt::get(CI1->getContext(), C1V.srem(C2V));
@@ -1170,7 +1170,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::LShr:
case Instruction::AShr:
case Instruction::Shl:
- if (CI1->equalsInt(0)) return C1;
+ if (CI1->isZero()) return C1;
break;
default:
break;
diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp
index d387a6f0ecb9..e31779c83e3a 100644
--- a/lib/IR/Constants.cpp
+++ b/lib/IR/Constants.cpp
@@ -512,7 +512,7 @@ ConstantInt *ConstantInt::getFalse(LLVMContext &Context) {
}
Constant *ConstantInt::getTrue(Type *Ty) {
- assert(Ty->getScalarType()->isIntegerTy(1) && "Type not i1 or vector of i1.");
+ assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1.");
ConstantInt *TrueC = ConstantInt::getTrue(Ty->getContext());
if (auto *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::getSplat(VTy->getNumElements(), TrueC);
@@ -520,7 +520,7 @@ Constant *ConstantInt::getTrue(Type *Ty) {
}
Constant *ConstantInt::getFalse(Type *Ty) {
- assert(Ty->getScalarType()->isIntegerTy(1) && "Type not i1 or vector of i1.");
+ assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1.");
ConstantInt *FalseC = ConstantInt::getFalse(Ty->getContext());
if (auto *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::getSplat(VTy->getNumElements(), FalseC);
@@ -1635,9 +1635,9 @@ Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced) {
Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
bool OnlyIfReduced) {
- assert(C->getType()->getScalarType()->isPointerTy() &&
+ assert(C->getType()->isPtrOrPtrVectorTy() &&
"PtrToInt source must be pointer or pointer vector");
- assert(DstTy->getScalarType()->isIntegerTy() &&
+ assert(DstTy->isIntOrIntVectorTy() &&
"PtrToInt destination must be integer or integer vector");
assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
if (isa<VectorType>(C->getType()))
@@ -1648,9 +1648,9 @@ Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy,
bool OnlyIfReduced) {
- assert(C->getType()->getScalarType()->isIntegerTy() &&
+ assert(C->getType()->isIntOrIntVectorTy() &&
"IntToPtr source must be integer or integer vector");
- assert(DstTy->getScalarType()->isPointerTy() &&
+ assert(DstTy->isPtrOrPtrVectorTy() &&
"IntToPtr destination must be a pointer or pointer vector");
assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
if (isa<VectorType>(C->getType()))
@@ -1914,8 +1914,8 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
Constant *RHS, bool OnlyIfReduced) {
assert(LHS->getType() == RHS->getType());
- assert(pred >= ICmpInst::FIRST_ICMP_PREDICATE &&
- pred <= ICmpInst::LAST_ICMP_PREDICATE && "Invalid ICmp Predicate");
+ assert(CmpInst::isIntPredicate((CmpInst::Predicate)pred) &&
+ "Invalid ICmp Predicate");
if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
return FC; // Fold a few common cases...
@@ -1939,7 +1939,8 @@ Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
Constant *ConstantExpr::getFCmp(unsigned short pred, Constant *LHS,
Constant *RHS, bool OnlyIfReduced) {
assert(LHS->getType() == RHS->getType());
- assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && "Invalid FCmp Predicate");
+ assert(CmpInst::isFPPredicate((CmpInst::Predicate)pred) &&
+ "Invalid FCmp Predicate");
if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
return FC; // Fold a few common cases...
@@ -2379,32 +2380,32 @@ void ConstantDataSequential::destroyConstantImpl() {
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint8_t> Elts) {
Type *Ty = ArrayType::get(Type::getInt8Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 1), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
Type *Ty = ArrayType::get(Type::getInt16Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
Type *Ty = ArrayType::get(Type::getInt32Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
Type *Ty = ArrayType::get(Type::getInt64Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<float> Elts) {
Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<double> Elts) {
Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
/// getFP() constructors - Return a constant with array type with an element
@@ -2416,27 +2417,26 @@ Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint16_t> Elts) {
Type *Ty = ArrayType::get(Type::getHalfTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint32_t> Elts) {
Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint64_t> Elts) {
Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataArray::getString(LLVMContext &Context,
StringRef Str, bool AddNull) {
if (!AddNull) {
const uint8_t *Data = reinterpret_cast<const uint8_t *>(Str.data());
- return get(Context, makeArrayRef(const_cast<uint8_t *>(Data),
- Str.size()));
+ return get(Context, makeArrayRef(Data, Str.size()));
}
SmallVector<uint8_t, 64> ElementVals;
@@ -2451,32 +2451,32 @@ Constant *ConstantDataArray::getString(LLVMContext &Context,
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint8_t> Elts){
Type *Ty = VectorType::get(Type::getInt8Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 1), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
Type *Ty = VectorType::get(Type::getInt16Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
Type *Ty = VectorType::get(Type::getInt32Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
Type *Ty = VectorType::get(Type::getInt64Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<float> Elts) {
Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<double> Elts) {
Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
/// getFP() constructors - Return a constant with vector type with an element
@@ -2488,19 +2488,19 @@ Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint16_t> Elts) {
Type *Ty = VectorType::get(Type::getHalfTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint32_t> Elts) {
Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint64_t> Elts) {
Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) {
@@ -2555,13 +2555,13 @@ uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const {
switch (getElementType()->getIntegerBitWidth()) {
default: llvm_unreachable("Invalid bitwidth for CDS");
case 8:
- return *const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(EltPtr));
+ return *reinterpret_cast<const uint8_t *>(EltPtr);
case 16:
- return *const_cast<uint16_t *>(reinterpret_cast<const uint16_t *>(EltPtr));
+ return *reinterpret_cast<const uint16_t *>(EltPtr);
case 32:
- return *const_cast<uint32_t *>(reinterpret_cast<const uint32_t *>(EltPtr));
+ return *reinterpret_cast<const uint32_t *>(EltPtr);
case 64:
- return *const_cast<uint64_t *>(reinterpret_cast<const uint64_t *>(EltPtr));
+ return *reinterpret_cast<const uint64_t *>(EltPtr);
}
}
@@ -2589,16 +2589,13 @@ APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const {
float ConstantDataSequential::getElementAsFloat(unsigned Elt) const {
assert(getElementType()->isFloatTy() &&
"Accessor can only be used when element is a 'float'");
- const float *EltPtr = reinterpret_cast<const float *>(getElementPointer(Elt));
- return *const_cast<float *>(EltPtr);
+ return *reinterpret_cast<const float *>(getElementPointer(Elt));
}
double ConstantDataSequential::getElementAsDouble(unsigned Elt) const {
assert(getElementType()->isDoubleTy() &&
"Accessor can only be used when element is a 'float'");
- const double *EltPtr =
- reinterpret_cast<const double *>(getElementPointer(Elt));
- return *const_cast<double *>(EltPtr);
+ return *reinterpret_cast<const double *>(getElementPointer(Elt));
}
Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const {
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index 4ff0261a7f08..2165ae5a9470 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -50,6 +50,7 @@ void llvm::initializeCore(PassRegistry &Registry) {
initializePrintModulePassWrapperPass(Registry);
initializePrintFunctionPassWrapperPass(Registry);
initializePrintBasicBlockPassPass(Registry);
+ initializeSafepointIRVerifierPass(Registry);
initializeVerifierLegacyPassPass(Registry);
}
@@ -2755,11 +2756,14 @@ static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
llvm_unreachable("Invalid AtomicOrdering value!");
}
+// TODO: Should this and other atomic instructions support building with
+// "syncscope"?
LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering,
LLVMBool isSingleThread, const char *Name) {
return wrap(
unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering),
- isSingleThread ? SingleThread : CrossThread,
+ isSingleThread ? SyncScope::SingleThread
+ : SyncScope::System,
Name));
}
@@ -3041,7 +3045,8 @@ LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break;
}
return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val),
- mapFromLLVMOrdering(ordering), singleThread ? SingleThread : CrossThread));
+ mapFromLLVMOrdering(ordering), singleThread ? SyncScope::SingleThread
+ : SyncScope::System));
}
LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
@@ -3053,7 +3058,7 @@ LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
return wrap(unwrap(B)->CreateAtomicCmpXchg(unwrap(Ptr), unwrap(Cmp),
unwrap(New), mapFromLLVMOrdering(SuccessOrdering),
mapFromLLVMOrdering(FailureOrdering),
- singleThread ? SingleThread : CrossThread));
+ singleThread ? SyncScope::SingleThread : SyncScope::System));
}
@@ -3061,17 +3066,18 @@ LLVMBool LLVMIsAtomicSingleThread(LLVMValueRef AtomicInst) {
Value *P = unwrap<Value>(AtomicInst);
if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
- return I->getSynchScope() == SingleThread;
- return cast<AtomicCmpXchgInst>(P)->getSynchScope() == SingleThread;
+ return I->getSyncScopeID() == SyncScope::SingleThread;
+ return cast<AtomicCmpXchgInst>(P)->getSyncScopeID() ==
+ SyncScope::SingleThread;
}
void LLVMSetAtomicSingleThread(LLVMValueRef AtomicInst, LLVMBool NewValue) {
Value *P = unwrap<Value>(AtomicInst);
- SynchronizationScope Sync = NewValue ? SingleThread : CrossThread;
+ SyncScope::ID SSID = NewValue ? SyncScope::SingleThread : SyncScope::System;
if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
- return I->setSynchScope(Sync);
- return cast<AtomicCmpXchgInst>(P)->setSynchScope(Sync);
+ return I->setSyncScopeID(SSID);
+ return cast<AtomicCmpXchgInst>(P)->setSyncScopeID(SSID);
}
LLVMAtomicOrdering LLVMGetCmpXchgSuccessOrdering(LLVMValueRef CmpXchgInst) {
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index 3dd653d2d047..365cb019aec4 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -362,13 +362,13 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
(LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
IgnoreAlignment) &&
LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
- LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope();
+ LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
(SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
IgnoreAlignment) &&
SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
- SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope();
+ SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
if (const CallInst *CI = dyn_cast<CallInst>(I1))
@@ -386,7 +386,7 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
- FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
+ FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
@@ -394,12 +394,13 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
CXI->getFailureOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
- CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
+ CXI->getSyncScopeID() ==
+ cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
- RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope();
+ RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
return true;
}
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index a79b00be4ffe..2c49564e328b 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -1304,34 +1304,34 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, Instruction *InsertBef)
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertBef) {}
+ SyncScope::System, InsertBef) {}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, BasicBlock *InsertAE)
: LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertAE) {}
+ SyncScope::System, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope, Instruction *InsertBef)
+ SyncScope::ID SSID, Instruction *InsertBef)
: UnaryInstruction(Ty, Load, Ptr, InsertBef) {
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAE)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
@@ -1419,16 +1419,16 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
Instruction *InsertBefore)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertBefore) {}
+ SyncScope::System, InsertBefore) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
BasicBlock *InsertAtEnd)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertAtEnd) {}
+ SyncScope::System, InsertAtEnd) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1438,13 +1438,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1454,7 +1454,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
}
@@ -1474,13 +1474,13 @@ void StoreInst::setAlignment(unsigned Align) {
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Cmp;
Op<2>() = NewVal;
setSuccessOrdering(SuccessOrdering);
setFailureOrdering(FailureOrdering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
"All operands must be non-null!");
@@ -1507,25 +1507,25 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1534,12 +1534,12 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Val;
setOperation(Operation);
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
assert(getOperand(0) && getOperand(1) &&
"All operands must be non-null!");
@@ -1554,24 +1554,24 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Val->getType(), AtomicRMW,
OperandTraits<AtomicRMWInst>::op_begin(this),
OperandTraits<AtomicRMWInst>::operands(this),
InsertBefore) {
- Init(Operation, Ptr, Val, Ordering, SynchScope);
+ Init(Operation, Ptr, Val, Ordering, SSID);
}
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Val->getType(), AtomicRMW,
OperandTraits<AtomicRMWInst>::op_begin(this),
OperandTraits<AtomicRMWInst>::operands(this),
InsertAtEnd) {
- Init(Operation, Ptr, Val, Ordering, SynchScope);
+ Init(Operation, Ptr, Val, Ordering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1579,19 +1579,19 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
//===----------------------------------------------------------------------===//
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
//===----------------------------------------------------------------------===//
@@ -3064,16 +3064,14 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
return false;
- return SrcTy->getScalarType()->isPointerTy() &&
- DstTy->getScalarType()->isIntegerTy();
+ return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
case Instruction::IntToPtr:
if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
return false;
if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
return false;
- return SrcTy->getScalarType()->isIntegerTy() &&
- DstTy->getScalarType()->isPointerTy();
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
case Instruction::BitCast: {
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
@@ -3797,12 +3795,12 @@ AllocaInst *AllocaInst::cloneImpl() const {
LoadInst *LoadInst::cloneImpl() const {
return new LoadInst(getOperand(0), Twine(), isVolatile(),
- getAlignment(), getOrdering(), getSynchScope());
+ getAlignment(), getOrdering(), getSyncScopeID());
}
StoreInst *StoreInst::cloneImpl() const {
return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
- getAlignment(), getOrdering(), getSynchScope());
+ getAlignment(), getOrdering(), getSyncScopeID());
}
@@ -3810,7 +3808,7 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicCmpXchgInst *Result =
new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
getSuccessOrdering(), getFailureOrdering(),
- getSynchScope());
+ getSyncScopeID());
Result->setVolatile(isVolatile());
Result->setWeak(isWeak());
return Result;
@@ -3818,14 +3816,14 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
AtomicRMWInst *Result =
- new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
- getOrdering(), getSynchScope());
+ new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
+ getOrdering(), getSyncScopeID());
Result->setVolatile(isVolatile());
return Result;
}
FenceInst *FenceInst::cloneImpl() const {
- return new FenceInst(getContext(), getOrdering(), getSynchScope());
+ return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
}
TruncInst *TruncInst::cloneImpl() const {
diff --git a/lib/IR/LLVMContext.cpp b/lib/IR/LLVMContext.cpp
index 2e13f362344d..c58459d6d5f5 100644
--- a/lib/IR/LLVMContext.cpp
+++ b/lib/IR/LLVMContext.cpp
@@ -81,6 +81,18 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
assert(GCTransitionEntry->second == LLVMContext::OB_gc_transition &&
"gc-transition operand bundle id drifted!");
(void)GCTransitionEntry;
+
+ SyncScope::ID SingleThreadSSID =
+ pImpl->getOrInsertSyncScopeID("singlethread");
+ assert(SingleThreadSSID == SyncScope::SingleThread &&
+ "singlethread synchronization scope ID drifted!");
+ (void)SingleThreadSSID;
+
+ SyncScope::ID SystemSSID =
+ pImpl->getOrInsertSyncScopeID("");
+ assert(SystemSSID == SyncScope::System &&
+ "system synchronization scope ID drifted!");
+ (void)SystemSSID;
}
LLVMContext::~LLVMContext() { delete pImpl; }
@@ -255,6 +267,14 @@ uint32_t LLVMContext::getOperandBundleTagID(StringRef Tag) const {
return pImpl->getOperandBundleTagID(Tag);
}
+SyncScope::ID LLVMContext::getOrInsertSyncScopeID(StringRef SSN) {
+ return pImpl->getOrInsertSyncScopeID(SSN);
+}
+
+void LLVMContext::getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const {
+ pImpl->getSyncScopeNames(SSNs);
+}
+
void LLVMContext::setGC(const Function &Fn, std::string GCName) {
auto It = pImpl->GCNames.find(&Fn);
diff --git a/lib/IR/LLVMContextImpl.cpp b/lib/IR/LLVMContextImpl.cpp
index c19e1be44fdc..57dd08b36fe7 100644
--- a/lib/IR/LLVMContextImpl.cpp
+++ b/lib/IR/LLVMContextImpl.cpp
@@ -205,6 +205,20 @@ uint32_t LLVMContextImpl::getOperandBundleTagID(StringRef Tag) const {
return I->second;
}
+SyncScope::ID LLVMContextImpl::getOrInsertSyncScopeID(StringRef SSN) {
+ auto NewSSID = SSC.size();
+ assert(NewSSID < std::numeric_limits<SyncScope::ID>::max() &&
+ "Hit the maximum number of synchronization scopes allowed!");
+ return SSC.insert(std::make_pair(SSN, SyncScope::ID(NewSSID))).first->second;
+}
+
+void LLVMContextImpl::getSyncScopeNames(
+ SmallVectorImpl<StringRef> &SSNs) const {
+ SSNs.resize(SSC.size());
+ for (const auto &SSE : SSC)
+ SSNs[SSE.second] = SSE.first();
+}
+
/// Singleton instance of the OptBisect class.
///
/// This singleton is accessed via the LLVMContext::getOptBisect() function. It
diff --git a/lib/IR/LLVMContextImpl.h b/lib/IR/LLVMContextImpl.h
index 395beb57fe37..e413a4f34432 100644
--- a/lib/IR/LLVMContextImpl.h
+++ b/lib/IR/LLVMContextImpl.h
@@ -1297,6 +1297,20 @@ public:
void getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const;
uint32_t getOperandBundleTagID(StringRef Tag) const;
+ /// A set of interned synchronization scopes. The StringMap maps
+ /// synchronization scope names to their respective synchronization scope IDs.
+ StringMap<SyncScope::ID> SSC;
+
+ /// getOrInsertSyncScopeID - Maps synchronization scope name to
+ /// synchronization scope ID. Every synchronization scope registered with
+ /// LLVMContext has unique ID except pre-defined ones.
+ SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
+
+ /// getSyncScopeNames - Populates client supplied SmallVector with
+ /// synchronization scope names registered with LLVMContext. Synchronization
+ /// scope names are ordered by increasing synchronization scope IDs.
+ void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
+
/// Maintain the GC name for each function.
///
/// This saves allocating an additional word in Function for programs which
diff --git a/lib/IR/Module.cpp b/lib/IR/Module.cpp
index f8853ed169c5..fdc7de6eaa34 100644
--- a/lib/IR/Module.cpp
+++ b/lib/IR/Module.cpp
@@ -88,7 +88,7 @@ Module::~Module() {
delete static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab);
}
-RandomNumberGenerator *Module::createRNG(const Pass* P) const {
+std::unique_ptr<RandomNumberGenerator> Module::createRNG(const Pass* P) const {
SmallString<32> Salt(P->getPassName());
// This RNG is guaranteed to produce the same random stream only
@@ -103,7 +103,7 @@ RandomNumberGenerator *Module::createRNG(const Pass* P) const {
// store salt metadata from the Module constructor.
Salt += sys::path::filename(getModuleIdentifier());
- return new RandomNumberGenerator(Salt);
+ return std::unique_ptr<RandomNumberGenerator>{new RandomNumberGenerator(Salt)};
}
/// getNamedValue - Return the first global value in the module with
diff --git a/lib/IR/SafepointIRVerifier.cpp b/lib/IR/SafepointIRVerifier.cpp
new file mode 100644
index 000000000000..8b328c221da3
--- /dev/null
+++ b/lib/IR/SafepointIRVerifier.cpp
@@ -0,0 +1,437 @@
+//===-- SafepointIRVerifier.cpp - Verify gc.statepoint invariants ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run a sanity check on the IR to ensure that Safepoints - if they've been
+// inserted - were inserted correctly. In particular, look for use of
+// non-relocated values after a safepoint. It's primary use is to check the
+// correctness of safepoint insertion immediately after insertion, but it can
+// also be used to verify that later transforms have not found a way to break
+// safepoint semenatics.
+//
+// In its current form, this verify checks a property which is sufficient, but
+// not neccessary for correctness. There are some cases where an unrelocated
+// pointer can be used after the safepoint. Consider this example:
+//
+// a = ...
+// b = ...
+// (a',b') = safepoint(a,b)
+// c = cmp eq a b
+// br c, ..., ....
+//
+// Because it is valid to reorder 'c' above the safepoint, this is legal. In
+// practice, this is a somewhat uncommon transform, but CodeGenPrep does create
+// idioms like this. The verifier knows about these cases and avoids reporting
+// false positives.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/SafepointIRVerifier.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "safepoint-ir-verifier"
+
+using namespace llvm;
+
+/// This option is used for writing test cases. Instead of crashing the program
+/// when verification fails, report a message to the console (for FileCheck
+/// usage) and continue execution as if nothing happened.
+static cl::opt<bool> PrintOnly("safepoint-ir-verifier-print-only",
+ cl::init(false));
+
+static void Verify(const Function &F, const DominatorTree &DT);
+
+struct SafepointIRVerifier : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ DominatorTree DT;
+ SafepointIRVerifier() : FunctionPass(ID) {
+ initializeSafepointIRVerifierPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ DT.recalculate(F);
+ Verify(F, DT);
+ return false; // no modifications
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ StringRef getPassName() const override { return "safepoint verifier"; }
+};
+
+void llvm::verifySafepointIR(Function &F) {
+ SafepointIRVerifier pass;
+ pass.runOnFunction(F);
+}
+
+char SafepointIRVerifier::ID = 0;
+
+FunctionPass *llvm::createSafepointIRVerifierPass() {
+ return new SafepointIRVerifier();
+}
+
+INITIALIZE_PASS_BEGIN(SafepointIRVerifier, "verify-safepoint-ir",
+ "Safepoint IR Verifier", false, true)
+INITIALIZE_PASS_END(SafepointIRVerifier, "verify-safepoint-ir",
+ "Safepoint IR Verifier", false, true)
+
+static bool isGCPointerType(Type *T) {
+ if (auto *PT = dyn_cast<PointerType>(T))
+ // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
+ // GC managed heap. We know that a pointer into this heap needs to be
+ // updated and that no other pointer does.
+ return (1 == PT->getAddressSpace());
+ return false;
+}
+
+static bool containsGCPtrType(Type *Ty) {
+ if (isGCPointerType(Ty))
+ return true;
+ if (VectorType *VT = dyn_cast<VectorType>(Ty))
+ return isGCPointerType(VT->getScalarType());
+ if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
+ return containsGCPtrType(AT->getElementType());
+ if (StructType *ST = dyn_cast<StructType>(Ty))
+ return std::any_of(ST->subtypes().begin(), ST->subtypes().end(),
+ containsGCPtrType);
+ return false;
+}
+
+// Debugging aid -- prints a [Begin, End) range of values.
+template<typename IteratorTy>
+static void PrintValueSet(raw_ostream &OS, IteratorTy Begin, IteratorTy End) {
+ OS << "[ ";
+ while (Begin != End) {
+ OS << **Begin << " ";
+ ++Begin;
+ }
+ OS << "]";
+}
+
+/// The verifier algorithm is phrased in terms of availability. The set of
+/// values "available" at a given point in the control flow graph is the set of
+/// correctly relocated value at that point, and is a subset of the set of
+/// definitions dominating that point.
+
+/// State we compute and track per basic block.
+struct BasicBlockState {
+ // Set of values available coming in, before the phi nodes
+ DenseSet<const Value *> AvailableIn;
+
+ // Set of values available going out
+ DenseSet<const Value *> AvailableOut;
+
+ // AvailableOut minus AvailableIn.
+ // All elements are Instructions
+ DenseSet<const Value *> Contribution;
+
+ // True if this block contains a safepoint and thus AvailableIn does not
+ // contribute to AvailableOut.
+ bool Cleared = false;
+};
+
+
+/// Gather all the definitions dominating the start of BB into Result. This is
+/// simply the Defs introduced by every dominating basic block and the function
+/// arguments.
+static void GatherDominatingDefs(const BasicBlock *BB,
+ DenseSet<const Value *> &Result,
+ const DominatorTree &DT,
+ DenseMap<const BasicBlock *, BasicBlockState *> &BlockMap) {
+ DomTreeNode *DTN = DT[const_cast<BasicBlock *>(BB)];
+
+ while (DTN->getIDom()) {
+ DTN = DTN->getIDom();
+ const auto &Defs = BlockMap[DTN->getBlock()]->Contribution;
+ Result.insert(Defs.begin(), Defs.end());
+ // If this block is 'Cleared', then nothing LiveIn to this block can be
+ // available after this block completes. Note: This turns out to be
+ // really important for reducing memory consuption of the initial available
+ // sets and thus peak memory usage by this verifier.
+ if (BlockMap[DTN->getBlock()]->Cleared)
+ return;
+ }
+
+ for (const Argument &A : BB->getParent()->args())
+ if (containsGCPtrType(A.getType()))
+ Result.insert(&A);
+}
+
+/// Model the effect of an instruction on the set of available values.
+static void TransferInstruction(const Instruction &I, bool &Cleared,
+ DenseSet<const Value *> &Available) {
+ if (isStatepoint(I)) {
+ Cleared = true;
+ Available.clear();
+ } else if (containsGCPtrType(I.getType()))
+ Available.insert(&I);
+}
+
+/// Compute the AvailableOut set for BB, based on the
+/// BasicBlockState BBS, which is the BasicBlockState for BB. FirstPass is set
+/// when the verifier runs for the first time computing the AvailableOut set
+/// for BB.
+static void TransferBlock(const BasicBlock *BB,
+ BasicBlockState &BBS, bool FirstPass) {
+
+ const DenseSet<const Value *> &AvailableIn = BBS.AvailableIn;
+ DenseSet<const Value *> &AvailableOut = BBS.AvailableOut;
+
+ if (BBS.Cleared) {
+ // AvailableOut does not change no matter how the input changes, just
+ // leave it be. We need to force this calculation the first time so that
+ // we have a AvailableOut at all.
+ if (FirstPass) {
+ AvailableOut = BBS.Contribution;
+ }
+ } else {
+ // Otherwise, we need to reduce the AvailableOut set by things which are no
+ // longer in our AvailableIn
+ DenseSet<const Value *> Temp = BBS.Contribution;
+ set_union(Temp, AvailableIn);
+ AvailableOut = std::move(Temp);
+ }
+
+ DEBUG(dbgs() << "Transfered block " << BB->getName() << " from ";
+ PrintValueSet(dbgs(), AvailableIn.begin(), AvailableIn.end());
+ dbgs() << " to ";
+ PrintValueSet(dbgs(), AvailableOut.begin(), AvailableOut.end());
+ dbgs() << "\n";);
+}
+
+/// A given derived pointer can have multiple base pointers through phi/selects.
+/// This type indicates when the base pointer is exclusively constant
+/// (ExclusivelySomeConstant), and if that constant is proven to be exclusively
+/// null, we record that as ExclusivelyNull. In all other cases, the BaseType is
+/// NonConstant.
+enum BaseType {
+ NonConstant = 1, // Base pointers is not exclusively constant.
+ ExclusivelyNull,
+ ExclusivelySomeConstant // Base pointers for a given derived pointer is from a
+ // set of constants, but they are not exclusively
+ // null.
+};
+
+/// Return the baseType for Val which states whether Val is exclusively
+/// derived from constant/null, or not exclusively derived from constant.
+/// Val is exclusively derived off a constant base when all operands of phi and
+/// selects are derived off a constant base.
+static enum BaseType getBaseType(const Value *Val) {
+
+ SmallVector<const Value *, 32> Worklist;
+ DenseSet<const Value *> Visited;
+ bool isExclusivelyDerivedFromNull = true;
+ Worklist.push_back(Val);
+ // Strip through all the bitcasts and geps to get base pointer. Also check for
+ // the exclusive value when there can be multiple base pointers (through phis
+ // or selects).
+ while(!Worklist.empty()) {
+ const Value *V = Worklist.pop_back_val();
+ if (!Visited.insert(V).second)
+ continue;
+
+ if (const auto *CI = dyn_cast<CastInst>(V)) {
+ Worklist.push_back(CI->stripPointerCasts());
+ continue;
+ }
+ if (const auto *GEP = dyn_cast<GetElementPtrInst>(V)) {
+ Worklist.push_back(GEP->getPointerOperand());
+ continue;
+ }
+ // Push all the incoming values of phi node into the worklist for
+ // processing.
+ if (const auto *PN = dyn_cast<PHINode>(V)) {
+ for (Value *InV: PN->incoming_values())
+ Worklist.push_back(InV);
+ continue;
+ }
+ if (const auto *SI = dyn_cast<SelectInst>(V)) {
+ // Push in the true and false values
+ Worklist.push_back(SI->getTrueValue());
+ Worklist.push_back(SI->getFalseValue());
+ continue;
+ }
+ if (isa<Constant>(V)) {
+ // We found at least one base pointer which is non-null, so this derived
+ // pointer is not exclusively derived from null.
+ if (V != Constant::getNullValue(V->getType()))
+ isExclusivelyDerivedFromNull = false;
+ // Continue processing the remaining values to make sure it's exclusively
+ // constant.
+ continue;
+ }
+ // At this point, we know that the base pointer is not exclusively
+ // constant.
+ return BaseType::NonConstant;
+ }
+ // Now, we know that the base pointer is exclusively constant, but we need to
+ // differentiate between exclusive null constant and non-null constant.
+ return isExclusivelyDerivedFromNull ? BaseType::ExclusivelyNull
+ : BaseType::ExclusivelySomeConstant;
+}
+
+static void Verify(const Function &F, const DominatorTree &DT) {
+ SpecificBumpPtrAllocator<BasicBlockState> BSAllocator;
+ DenseMap<const BasicBlock *, BasicBlockState *> BlockMap;
+
+ DEBUG(dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n");
+ if (PrintOnly)
+ dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n";
+
+
+ for (const BasicBlock &BB : F) {
+ BasicBlockState *BBS = new(BSAllocator.Allocate()) BasicBlockState;
+ for (const auto &I : BB)
+ TransferInstruction(I, BBS->Cleared, BBS->Contribution);
+ BlockMap[&BB] = BBS;
+ }
+
+ for (auto &BBI : BlockMap) {
+ GatherDominatingDefs(BBI.first, BBI.second->AvailableIn, DT, BlockMap);
+ TransferBlock(BBI.first, *BBI.second, true);
+ }
+
+ SetVector<const BasicBlock *> Worklist;
+ for (auto &BBI : BlockMap)
+ Worklist.insert(BBI.first);
+
+ // This loop iterates the AvailableIn and AvailableOut sets to a fixed point.
+ // The AvailableIn and AvailableOut sets decrease as we iterate.
+ while (!Worklist.empty()) {
+ const BasicBlock *BB = Worklist.pop_back_val();
+ BasicBlockState *BBS = BlockMap[BB];
+
+ size_t OldInCount = BBS->AvailableIn.size();
+ for (const BasicBlock *PBB : predecessors(BB))
+ set_intersect(BBS->AvailableIn, BlockMap[PBB]->AvailableOut);
+
+ if (OldInCount == BBS->AvailableIn.size())
+ continue;
+
+ assert(OldInCount > BBS->AvailableIn.size() && "invariant!");
+
+ size_t OldOutCount = BBS->AvailableOut.size();
+ TransferBlock(BB, *BBS, false);
+ if (OldOutCount != BBS->AvailableOut.size()) {
+ assert(OldOutCount > BBS->AvailableOut.size() && "invariant!");
+ Worklist.insert(succ_begin(BB), succ_end(BB));
+ }
+ }
+
+ // We now have all the information we need to decide if the use of a heap
+ // reference is legal or not, given our safepoint semantics.
+
+ bool AnyInvalidUses = false;
+
+ auto ReportInvalidUse = [&AnyInvalidUses](const Value &V,
+ const Instruction &I) {
+ errs() << "Illegal use of unrelocated value found!\n";
+ errs() << "Def: " << V << "\n";
+ errs() << "Use: " << I << "\n";
+ if (!PrintOnly)
+ abort();
+ AnyInvalidUses = true;
+ };
+
+ auto isNotExclusivelyConstantDerived = [](const Value *V) {
+ return getBaseType(V) == BaseType::NonConstant;
+ };
+
+ for (const BasicBlock &BB : F) {
+ // We destructively modify AvailableIn as we traverse the block instruction
+ // by instruction.
+ DenseSet<const Value *> &AvailableSet = BlockMap[&BB]->AvailableIn;
+ for (const Instruction &I : BB) {
+ if (const PHINode *PN = dyn_cast<PHINode>(&I)) {
+ if (containsGCPtrType(PN->getType()))
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ const BasicBlock *InBB = PN->getIncomingBlock(i);
+ const Value *InValue = PN->getIncomingValue(i);
+
+ if (isNotExclusivelyConstantDerived(InValue) &&
+ !BlockMap[InBB]->AvailableOut.count(InValue))
+ ReportInvalidUse(*InValue, *PN);
+ }
+ } else if (isa<CmpInst>(I) &&
+ containsGCPtrType(I.getOperand(0)->getType())) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ enum BaseType baseTyLHS = getBaseType(LHS),
+ baseTyRHS = getBaseType(RHS);
+
+ // Returns true if LHS and RHS are unrelocated pointers and they are
+ // valid unrelocated uses.
+ auto hasValidUnrelocatedUse = [&AvailableSet, baseTyLHS, baseTyRHS, &LHS, &RHS] () {
+ // A cmp instruction has valid unrelocated pointer operands only if
+ // both operands are unrelocated pointers.
+ // In the comparison between two pointers, if one is an unrelocated
+ // use, the other *should be* an unrelocated use, for this
+ // instruction to contain valid unrelocated uses. This unrelocated
+ // use can be a null constant as well, or another unrelocated
+ // pointer.
+ if (AvailableSet.count(LHS) || AvailableSet.count(RHS))
+ return false;
+ // Constant pointers (that are not exclusively null) may have
+ // meaning in different VMs, so we cannot reorder the compare
+ // against constant pointers before the safepoint. In other words,
+ // comparison of an unrelocated use against a non-null constant
+ // maybe invalid.
+ if ((baseTyLHS == BaseType::ExclusivelySomeConstant &&
+ baseTyRHS == BaseType::NonConstant) ||
+ (baseTyLHS == BaseType::NonConstant &&
+ baseTyRHS == BaseType::ExclusivelySomeConstant))
+ return false;
+ // All other cases are valid cases enumerated below:
+ // 1. Comparison between an exlusively derived null pointer and a
+ // constant base pointer.
+ // 2. Comparison between an exlusively derived null pointer and a
+ // non-constant unrelocated base pointer.
+ // 3. Comparison between 2 unrelocated pointers.
+ return true;
+ };
+ if (!hasValidUnrelocatedUse()) {
+ // Print out all non-constant derived pointers that are unrelocated
+ // uses, which are invalid.
+ if (baseTyLHS == BaseType::NonConstant && !AvailableSet.count(LHS))
+ ReportInvalidUse(*LHS, I);
+ if (baseTyRHS == BaseType::NonConstant && !AvailableSet.count(RHS))
+ ReportInvalidUse(*RHS, I);
+ }
+ } else {
+ for (const Value *V : I.operands())
+ if (containsGCPtrType(V->getType()) &&
+ isNotExclusivelyConstantDerived(V) && !AvailableSet.count(V))
+ ReportInvalidUse(*V, I);
+ }
+
+ bool Cleared = false;
+ TransferInstruction(I, Cleared, AvailableSet);
+ (void)Cleared;
+ }
+ }
+
+ if (PrintOnly && !AnyInvalidUses) {
+ dbgs() << "No illegal uses found by SafepointIRVerifier in: " << F.getName()
+ << "\n";
+ }
+}
diff --git a/lib/IR/Type.cpp b/lib/IR/Type.cpp
index 44fe5e48c720..20e9c2b5fff2 100644
--- a/lib/IR/Type.cpp
+++ b/lib/IR/Type.cpp
@@ -538,7 +538,7 @@ bool CompositeType::indexValid(const Value *V) const {
if (auto *STy = dyn_cast<StructType>(this)) {
// Structure indexes require (vectors of) 32-bit integer constants. In the
// vector case all of the indices must be equal.
- if (!V->getType()->getScalarType()->isIntegerTy(32))
+ if (!V->getType()->isIntOrIntVectorTy(32))
return false;
const Constant *C = dyn_cast<Constant>(V);
if (C && V->getType()->isVectorTy())
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index 819f63520c74..454a56a76923 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -2504,15 +2504,13 @@ void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
Type *SrcTy = I.getOperand(0)->getType();
Type *DestTy = I.getType();
- Assert(SrcTy->getScalarType()->isPointerTy(),
- "PtrToInt source must be pointer", &I);
+ Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
Assert(!DL.isNonIntegralPointerType(PTy),
"ptrtoint not supported for non-integral pointers");
- Assert(DestTy->getScalarType()->isIntegerTy(),
- "PtrToInt result must be integral", &I);
+ Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
&I);
@@ -2531,10 +2529,9 @@ void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
Type *SrcTy = I.getOperand(0)->getType();
Type *DestTy = I.getType();
- Assert(SrcTy->getScalarType()->isIntegerTy(),
+ Assert(SrcTy->isIntOrIntVectorTy(),
"IntToPtr source must be an integral", &I);
- Assert(DestTy->getScalarType()->isPointerTy(),
- "IntToPtr result must be a pointer", &I);
+ Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
Assert(!DL.isNonIntegralPointerType(PTy),
@@ -2952,11 +2949,10 @@ void Verifier::visitICmpInst(ICmpInst &IC) {
Assert(Op0Ty == Op1Ty,
"Both operands to ICmp instruction are not of the same type!", &IC);
// Check that the operands are the right type
- Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(),
+ Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
"Invalid operand types for ICmp instruction", &IC);
// Check that the predicate is valid.
- Assert(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
- IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE,
+ Assert(IC.isIntPredicate(),
"Invalid predicate in ICmp instruction!", &IC);
visitInstruction(IC);
@@ -2972,8 +2968,7 @@ void Verifier::visitFCmpInst(FCmpInst &FC) {
Assert(Op0Ty->isFPOrFPVectorTy(),
"Invalid operand types for FCmp instruction", &FC);
// Check that the predicate is valid.
- Assert(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE &&
- FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE,
+ Assert(FC.isFPPredicate(),
"Invalid predicate in FCmp instruction!", &FC);
visitInstruction(FC);
@@ -3011,7 +3006,7 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
- Assert(GEP.getType()->getScalarType()->isPointerTy() &&
+ Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
GEP.getResultElementType() == ElTy,
"GEP is not of right type for indices!", &GEP, ElTy);
@@ -3027,7 +3022,7 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
unsigned IndexWidth = IndexTy->getVectorNumElements();
Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
}
- Assert(IndexTy->getScalarType()->isIntegerTy(),
+ Assert(IndexTy->isIntOrIntVectorTy(),
"All GEP indices should be of integer type");
}
}
@@ -3113,7 +3108,7 @@ void Verifier::visitLoadInst(LoadInst &LI) {
ElTy, &LI);
checkAtomicMemAccessSize(ElTy, &LI);
} else {
- Assert(LI.getSynchScope() == CrossThread,
+ Assert(LI.getSyncScopeID() == SyncScope::System,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
}
@@ -3142,7 +3137,7 @@ void Verifier::visitStoreInst(StoreInst &SI) {
ElTy, &SI);
checkAtomicMemAccessSize(ElTy, &SI);
} else {
- Assert(SI.getSynchScope() == CrossThread,
+ Assert(SI.getSyncScopeID() == SyncScope::System,
"Non-atomic store cannot have SynchronizationScope specified", &SI);
}
visitInstruction(SI);
@@ -4049,6 +4044,73 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
"incorrect alignment of the source argument", CS);
break;
}
+ case Intrinsic::memmove_element_unordered_atomic: {
+ auto *MI = cast<ElementUnorderedAtomicMemMoveInst>(CS.getInstruction());
+
+ ConstantInt *ElementSizeCI =
+ dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
+ Assert(ElementSizeCI,
+ "element size of the element-wise unordered atomic memory "
+ "intrinsic must be a constant int",
+ CS);
+ const APInt &ElementSizeVal = ElementSizeCI->getValue();
+ Assert(ElementSizeVal.isPowerOf2(),
+ "element size of the element-wise atomic memory intrinsic "
+ "must be a power of 2",
+ CS);
+
+ if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
+ uint64_t Length = LengthCI->getZExtValue();
+ uint64_t ElementSize = MI->getElementSizeInBytes();
+ Assert((Length % ElementSize) == 0,
+ "constant length must be a multiple of the element size in the "
+ "element-wise atomic memory intrinsic",
+ CS);
+ }
+
+ auto IsValidAlignment = [&](uint64_t Alignment) {
+ return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
+ };
+ uint64_t DstAlignment = CS.getParamAlignment(0),
+ SrcAlignment = CS.getParamAlignment(1);
+ Assert(IsValidAlignment(DstAlignment),
+ "incorrect alignment of the destination argument", CS);
+ Assert(IsValidAlignment(SrcAlignment),
+ "incorrect alignment of the source argument", CS);
+ break;
+ }
+ case Intrinsic::memset_element_unordered_atomic: {
+ auto *MI = cast<ElementUnorderedAtomicMemSetInst>(CS.getInstruction());
+
+ ConstantInt *ElementSizeCI =
+ dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
+ Assert(ElementSizeCI,
+ "element size of the element-wise unordered atomic memory "
+ "intrinsic must be a constant int",
+ CS);
+ const APInt &ElementSizeVal = ElementSizeCI->getValue();
+ Assert(ElementSizeVal.isPowerOf2(),
+ "element size of the element-wise atomic memory intrinsic "
+ "must be a power of 2",
+ CS);
+
+ if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
+ uint64_t Length = LengthCI->getZExtValue();
+ uint64_t ElementSize = MI->getElementSizeInBytes();
+ Assert((Length % ElementSize) == 0,
+ "constant length must be a multiple of the element size in the "
+ "element-wise atomic memory intrinsic",
+ CS);
+ }
+
+ auto IsValidAlignment = [&](uint64_t Alignment) {
+ return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
+ };
+ uint64_t DstAlignment = CS.getParamAlignment(0);
+ Assert(IsValidAlignment(DstAlignment),
+ "incorrect alignment of the destination argument", CS);
+ break;
+ }
case Intrinsic::gcroot:
case Intrinsic::gcwrite:
case Intrinsic::gcread:
@@ -4253,7 +4315,7 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
// relocated pointer. It can be casted to the correct type later if it's
// desired. However, they must have the same address space and 'vectorness'
GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
- Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(),
+ Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
"gc.relocate: relocated value must be a gc pointer", CS);
auto ResultType = CS.getType();
diff --git a/lib/LTO/LTO.cpp b/lib/LTO/LTO.cpp
index 68b8c9fcb939..19973946ac5a 100644
--- a/lib/LTO/LTO.cpp
+++ b/lib/LTO/LTO.cpp
@@ -665,6 +665,15 @@ Error LTO::addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
auto GUID = GlobalValue::getGUID(GlobalValue::getGlobalIdentifier(
Sym.getIRName(), GlobalValue::ExternalLinkage, ""));
ThinLTO.PrevailingModuleForGUID[GUID] = BM.getModuleIdentifier();
+
+ // For linker redefined symbols (via --wrap or --defsym) we want to
+ // switch the linkage to `weak` to prevent IPOs from happening.
+ // Find the summary in the module for this very GV and record the new
+ // linkage so that we can switch it when we import the GV.
+ if (Res.LinkerRedefined)
+ if (auto S = ThinLTO.CombinedIndex.findSummaryInModule(
+ GUID, BM.getModuleIdentifier()))
+ S->setLinkage(GlobalValue::WeakAnyLinkage);
}
}
}
@@ -1021,7 +1030,7 @@ Error LTO::runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache,
// Collect for each module the list of function it defines (GUID ->
// Summary).
- StringMap<std::map<GlobalValue::GUID, GlobalValueSummary *>>
+ StringMap<GVSummaryMapTy>
ModuleToDefinedGVSummaries(ThinLTO.ModuleMap.size());
ThinLTO.CombinedIndex.collectDefinedGVSummariesPerModule(
ModuleToDefinedGVSummaries);
diff --git a/lib/Linker/IRMover.cpp b/lib/Linker/IRMover.cpp
index defad1904989..f486e525b5e7 100644
--- a/lib/Linker/IRMover.cpp
+++ b/lib/Linker/IRMover.cpp
@@ -1256,6 +1256,18 @@ Error IRLinker::linkModuleFlagsMetadata() {
return Error::success();
}
+/// Return InlineAsm adjusted with target-specific directives if required.
+/// For ARM and Thumb, we have to add directives to select the appropriate ISA
+/// to support mixing module-level inline assembly from ARM and Thumb modules.
+static std::string adjustInlineAsm(const std::string &InlineAsm,
+ const Triple &Triple) {
+ if (Triple.getArch() == Triple::thumb || Triple.getArch() == Triple::thumbeb)
+ return ".text\n.balign 2\n.thumb\n" + InlineAsm;
+ if (Triple.getArch() == Triple::arm || Triple.getArch() == Triple::armeb)
+ return ".text\n.balign 4\n.arm\n" + InlineAsm;
+ return InlineAsm;
+}
+
Error IRLinker::run() {
// Ensure metadata materialized before value mapping.
if (SrcM->getMaterializer())
@@ -1293,11 +1305,13 @@ Error IRLinker::run() {
// Append the module inline asm string.
if (!IsPerformingImport && !SrcM->getModuleInlineAsm().empty()) {
+ std::string SrcModuleInlineAsm = adjustInlineAsm(SrcM->getModuleInlineAsm(),
+ SrcTriple);
if (DstM.getModuleInlineAsm().empty())
- DstM.setModuleInlineAsm(SrcM->getModuleInlineAsm());
+ DstM.setModuleInlineAsm(SrcModuleInlineAsm);
else
DstM.setModuleInlineAsm(DstM.getModuleInlineAsm() + "\n" +
- SrcM->getModuleInlineAsm());
+ SrcModuleInlineAsm);
}
// Loop over all of the linked values to compute type mappings.
diff --git a/lib/MC/ELFObjectWriter.cpp b/lib/MC/ELFObjectWriter.cpp
index 30f357826805..c8dd63011943 100644
--- a/lib/MC/ELFObjectWriter.cpp
+++ b/lib/MC/ELFObjectWriter.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
@@ -26,6 +27,7 @@
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCFragment.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSection.h"
@@ -204,8 +206,7 @@ public:
void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, bool &IsPCRel,
- uint64_t &FixedValue) override;
+ MCValue Target, uint64_t &FixedValue) override;
// Map from a signature symbol to the group section index
using RevGroupMapTy = DenseMap<const MCSymbol *, unsigned>;
@@ -626,7 +627,10 @@ void ELFObjectWriter::recordRelocation(MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
- bool &IsPCRel, uint64_t &FixedValue) {
+ uint64_t &FixedValue) {
+ MCAsmBackend &Backend = Asm.getBackend();
+ bool IsPCRel = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
+ MCFixupKindInfo::FKF_IsPCRel;
const MCSectionELF &FixupSection = cast<MCSectionELF>(*Fragment->getParent());
uint64_t C = Target.getConstant();
uint64_t FixupOffset = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
diff --git a/lib/MC/MCAssembler.cpp b/lib/MC/MCAssembler.cpp
index 0318d916aa49..eaf6f19326eb 100644
--- a/lib/MC/MCAssembler.cpp
+++ b/lib/MC/MCAssembler.cpp
@@ -653,16 +653,14 @@ MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F,
// Evaluate the fixup.
MCValue Target;
uint64_t FixedValue;
- bool IsPCRel = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
- MCFixupKindInfo::FKF_IsPCRel;
- if (!evaluateFixup(Layout, Fixup, &F, Target, FixedValue)) {
+ bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue);
+ if (!IsResolved) {
// The fixup was unresolved, we need a relocation. Inform the object
// writer of the relocation, and give it an opportunity to adjust the
// fixup value if need be.
- getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, IsPCRel,
- FixedValue);
+ getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue);
}
- return std::make_tuple(Target, FixedValue, IsPCRel);
+ return std::make_tuple(Target, FixedValue, IsResolved);
}
void MCAssembler::layout(MCAsmLayout &Layout) {
@@ -738,12 +736,12 @@ void MCAssembler::layout(MCAsmLayout &Layout) {
llvm_unreachable("Unknown fragment with fixups!");
for (const MCFixup &Fixup : Fixups) {
uint64_t FixedValue;
- bool IsPCRel;
+ bool IsResolved;
MCValue Target;
- std::tie(Target, FixedValue, IsPCRel) =
+ std::tie(Target, FixedValue, IsResolved) =
handleFixup(Layout, Frag, Fixup);
getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue,
- IsPCRel);
+ IsResolved);
}
}
}
diff --git a/lib/MC/MachObjectWriter.cpp b/lib/MC/MachObjectWriter.cpp
index c4e7cdbe095e..62bf0a58fdfa 100644
--- a/lib/MC/MachObjectWriter.cpp
+++ b/lib/MC/MachObjectWriter.cpp
@@ -449,7 +449,7 @@ void MachObjectWriter::recordRelocation(MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
- bool &IsPCRel, uint64_t &FixedValue) {
+ uint64_t &FixedValue) {
TargetObjectWriter->recordRelocation(this, Asm, Layout, Fragment, Fixup,
Target, FixedValue);
}
diff --git a/lib/MC/WasmObjectWriter.cpp b/lib/MC/WasmObjectWriter.cpp
index 82352cb50c70..0d31f65c49d9 100644
--- a/lib/MC/WasmObjectWriter.cpp
+++ b/lib/MC/WasmObjectWriter.cpp
@@ -36,8 +36,7 @@
using namespace llvm;
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "reloc-info"
+#define DEBUG_TYPE "mc"
namespace {
@@ -153,7 +152,7 @@ struct WasmRelocationEntry {
}
void print(raw_ostream &Out) const {
- Out << "Off=" << Offset << ", Sym=" << Symbol << ", Addend=" << Addend
+ Out << "Off=" << Offset << ", Sym=" << *Symbol << ", Addend=" << Addend
<< ", Type=" << Type << ", FixupSection=" << FixupSection;
}
@@ -199,6 +198,7 @@ class WasmObjectWriter : public MCObjectWriter {
DenseMap<WasmFunctionType, int32_t, WasmFunctionTypeDenseMapInfo>
FunctionTypeIndices;
+ SmallVector<WasmFunctionType, 4> FunctionTypes;
// TargetObjectWriter wrappers.
bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
@@ -224,6 +224,7 @@ private:
SymbolIndices.clear();
IndirectSymbolIndices.clear();
FunctionTypeIndices.clear();
+ FunctionTypes.clear();
MCObjectWriter::reset();
}
@@ -231,8 +232,7 @@ private:
void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, bool &IsPCRel,
- uint64_t &FixedValue) override;
+ MCValue Target, uint64_t &FixedValue) override;
void executePostLayoutBinding(MCAssembler &Asm,
const MCAsmLayout &Layout) override;
@@ -276,6 +276,8 @@ private:
void writeRelocations(ArrayRef<WasmRelocationEntry> Relocations,
uint64_t HeaderSize);
uint32_t getRelocationIndexValue(const WasmRelocationEntry &RelEntry);
+ uint32_t getFunctionType(const MCSymbolWasm& Symbol);
+ uint32_t registerFunctionType(const MCSymbolWasm& Symbol);
};
} // end anonymous namespace
@@ -350,7 +352,10 @@ void WasmObjectWriter::recordRelocation(MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
- bool &IsPCRel, uint64_t &FixedValue) {
+ uint64_t &FixedValue) {
+ MCAsmBackend &Backend = Asm.getBackend();
+ bool IsPCRel = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
+ MCFixupKindInfo::FKF_IsPCRel;
const auto &FixupSection = cast<MCSectionWasm>(*Fragment->getParent());
uint64_t C = Target.getConstant();
uint64_t FixupOffset = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
@@ -401,15 +406,11 @@ void WasmObjectWriter::recordRelocation(MCAssembler &Asm,
const MCSymbolRefExpr *RefA = Target.getSymA();
const auto *SymA = RefA ? cast<MCSymbolWasm>(&RefA->getSymbol()) : nullptr;
- bool ViaWeakRef = false;
if (SymA && SymA->isVariable()) {
const MCExpr *Expr = SymA->getVariableValue();
- if (const auto *Inner = dyn_cast<MCSymbolRefExpr>(Expr)) {
- if (Inner->getKind() == MCSymbolRefExpr::VK_WEAKREF) {
- SymA = cast<MCSymbolWasm>(&Inner->getSymbol());
- ViaWeakRef = true;
- }
- }
+ const auto *Inner = cast<MCSymbolRefExpr>(Expr);
+ if (Inner->getKind() == MCSymbolRefExpr::VK_WEAKREF)
+ llvm_unreachable("weakref used in reloc not yet implemented");
}
// Put any constant offset in an addend. Offsets can be negative, and
@@ -417,12 +418,8 @@ void WasmObjectWriter::recordRelocation(MCAssembler &Asm,
// be negative and don't wrap.
FixedValue = 0;
- if (SymA) {
- if (ViaWeakRef)
- llvm_unreachable("weakref used in reloc not yet implemented");
- else
- SymA->setUsedInReloc();
- }
+ if (SymA)
+ SymA->setUsedInReloc();
assert(!IsPCRel);
assert(SymA);
@@ -493,7 +490,7 @@ uint32_t WasmObjectWriter::getRelocationIndexValue(
case wasm::R_WEBASSEMBLY_TABLE_INDEX_SLEB:
case wasm::R_WEBASSEMBLY_TABLE_INDEX_I32:
if (!IndirectSymbolIndices.count(RelEntry.Symbol))
- report_fatal_error("symbol not found table index space:" +
+ report_fatal_error("symbol not found table index space: " +
RelEntry.Symbol->getName());
return IndirectSymbolIndices[RelEntry.Symbol];
case wasm::R_WEBASSEMBLY_FUNCTION_INDEX_LEB:
@@ -502,12 +499,12 @@ uint32_t WasmObjectWriter::getRelocationIndexValue(
case wasm::R_WEBASSEMBLY_GLOBAL_ADDR_SLEB:
case wasm::R_WEBASSEMBLY_GLOBAL_ADDR_I32:
if (!SymbolIndices.count(RelEntry.Symbol))
- report_fatal_error("symbol not found function/global index space:" +
+ report_fatal_error("symbol not found function/global index space: " +
RelEntry.Symbol->getName());
return SymbolIndices[RelEntry.Symbol];
case wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB:
if (!TypeIndices.count(RelEntry.Symbol))
- report_fatal_error("symbol not found in type index space:" +
+ report_fatal_error("symbol not found in type index space: " +
RelEntry.Symbol->getName());
return TypeIndices[RelEntry.Symbol];
default:
@@ -913,6 +910,38 @@ void WasmObjectWriter::writeLinkingMetaDataSection(
endSection(Section);
}
+uint32_t WasmObjectWriter::getFunctionType(const MCSymbolWasm& Symbol) {
+ assert(Symbol.isFunction());
+ assert(TypeIndices.count(&Symbol));
+ return TypeIndices[&Symbol];
+}
+
+uint32_t WasmObjectWriter::registerFunctionType(const MCSymbolWasm& Symbol) {
+ assert(Symbol.isFunction());
+
+ WasmFunctionType F;
+ if (Symbol.isVariable()) {
+ const MCExpr *Expr = Symbol.getVariableValue();
+ auto *Inner = cast<MCSymbolRefExpr>(Expr);
+ const auto *ResolvedSym = cast<MCSymbolWasm>(&Inner->getSymbol());
+ F.Returns = ResolvedSym->getReturns();
+ F.Params = ResolvedSym->getParams();
+ } else {
+ F.Returns = Symbol.getReturns();
+ F.Params = Symbol.getParams();
+ }
+
+ auto Pair =
+ FunctionTypeIndices.insert(std::make_pair(F, FunctionTypes.size()));
+ if (Pair.second)
+ FunctionTypes.push_back(F);
+ TypeIndices[&Symbol] = Pair.first->second;
+
+ DEBUG(dbgs() << "registerFunctionType: " << Symbol << " new:" << Pair.second << "\n");
+ DEBUG(dbgs() << " -> type index: " << Pair.first->second << "\n");
+ return Pair.first->second;
+}
+
void WasmObjectWriter::writeObject(MCAssembler &Asm,
const MCAsmLayout &Layout) {
DEBUG(dbgs() << "WasmObjectWriter::writeObject\n");
@@ -920,7 +949,6 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
wasm::ValType PtrType = is64Bit() ? wasm::ValType::I64 : wasm::ValType::I32;
// Collect information from the available symbols.
- SmallVector<WasmFunctionType, 4> FunctionTypes;
SmallVector<WasmFunction, 4> Functions;
SmallVector<uint32_t, 4> TableElems;
SmallVector<WasmGlobal, 4> Globals;
@@ -960,37 +988,27 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
// Populate the Imports set.
for (const MCSymbol &S : Asm.symbols()) {
const auto &WS = static_cast<const MCSymbolWasm &>(S);
- int32_t Type;
- if (WS.isFunction()) {
- // Prepare the function's type, if we haven't seen it yet.
- WasmFunctionType F;
- F.Returns = WS.getReturns();
- F.Params = WS.getParams();
- auto Pair =
- FunctionTypeIndices.insert(std::make_pair(F, FunctionTypes.size()));
- if (Pair.second)
- FunctionTypes.push_back(F);
-
- Type = Pair.first->second;
- } else {
- Type = int32_t(PtrType);
- }
+ if (WS.isTemporary())
+ continue;
+
+ if (WS.isFunction())
+ registerFunctionType(WS);
// If the symbol is not defined in this translation unit, import it.
- if (!WS.isTemporary() && !WS.isDefined(/*SetUsed=*/false)) {
+ if (!WS.isDefined(/*SetUsed=*/false) || WS.isVariable()) {
WasmImport Import;
Import.ModuleName = WS.getModuleName();
Import.FieldName = WS.getName();
if (WS.isFunction()) {
Import.Kind = wasm::WASM_EXTERNAL_FUNCTION;
- Import.Type = Type;
+ Import.Type = getFunctionType(WS);
SymbolIndices[&WS] = NumFuncImports;
++NumFuncImports;
} else {
Import.Kind = wasm::WASM_EXTERNAL_GLOBAL;
- Import.Type = Type;
+ Import.Type = int32_t(PtrType);
SymbolIndices[&WS] = NumGlobalImports;
++NumGlobalImports;
}
@@ -1082,10 +1100,6 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
if (S.isTemporary() && S.getName().empty())
continue;
- // Variable references (weak references) are handled in a second pass
- if (S.isVariable())
- continue;
-
const auto &WS = static_cast<const MCSymbolWasm &>(S);
DEBUG(dbgs() << "MCSymbol: '" << S << "'"
<< " isDefined=" << S.isDefined() << " isExternal="
@@ -1097,20 +1111,12 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
if (WS.isWeak())
WeakSymbols.push_back(WS.getName());
+ if (WS.isVariable())
+ continue;
+
unsigned Index;
if (WS.isFunction()) {
- // Prepare the function's type, if we haven't seen it yet.
- WasmFunctionType F;
- F.Returns = WS.getReturns();
- F.Params = WS.getParams();
- auto Pair =
- FunctionTypeIndices.insert(std::make_pair(F, FunctionTypes.size()));
- if (Pair.second)
- FunctionTypes.push_back(F);
-
- int32_t Type = Pair.first->second;
-
if (WS.isDefined(/*SetUsed=*/false)) {
if (WS.getOffset() != 0)
report_fatal_error(
@@ -1125,21 +1131,21 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
// Prepare the function.
WasmFunction Func;
- Func.Type = Type;
+ Func.Type = getFunctionType(WS);
Func.Sym = &WS;
SymbolIndices[&WS] = Index;
Functions.push_back(Func);
} else {
- // Should be no such thing as weak undefined symbol
- assert(!WS.isVariable());
-
// An import; the index was assigned above.
Index = SymbolIndices.find(&WS)->second;
}
+ DEBUG(dbgs() << " -> function index: " << Index << "\n");
+
// If needed, prepare the function to be called indirectly.
- if (IsAddressTaken.count(&WS)) {
+ if (IsAddressTaken.count(&WS) != 0) {
IndirectSymbolIndices[&WS] = TableElems.size();
+ DEBUG(dbgs() << " -> adding to table: " << TableElems.size() << "\n");
TableElems.push_back(Index);
}
} else {
@@ -1185,7 +1191,7 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
Align->getMaxBytesToEmit());
DataBytes.resize(Size, Value);
} else if (auto *Fill = dyn_cast<MCFillFragment>(&Frag)) {
- DataBytes.insert(DataBytes.end(), Size, Fill->getValue());
+ DataBytes.insert(DataBytes.end(), Fill->getSize(), Fill->getValue());
} else {
const auto &DataFrag = cast<MCDataFragment>(Frag);
const SmallVectorImpl<char> &Contents = DataFrag.getContents();
@@ -1205,11 +1211,12 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
Global.InitialValue = DataSection.getSectionOffset();
Global.ImportIndex = 0;
SymbolIndices[&WS] = Index;
+ DEBUG(dbgs() << " -> global index: " << Index << "\n");
Globals.push_back(Global);
}
// If the symbol is visible outside this translation unit, export it.
- if (WS.isExternal() && WS.isDefined(/*SetUsed=*/false)) {
+ if ((WS.isExternal() && WS.isDefined(/*SetUsed=*/false))) {
WasmExport Export;
Export.FieldName = WS.getName();
Export.Index = Index;
@@ -1217,26 +1224,28 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
Export.Kind = wasm::WASM_EXTERNAL_FUNCTION;
else
Export.Kind = wasm::WASM_EXTERNAL_GLOBAL;
+ DEBUG(dbgs() << " -> export " << Exports.size() << "\n");
Exports.push_back(Export);
}
}
- // Handle weak aliases
+ // Handle weak aliases. We need to process these in a separate pass because
+ // we need to have processed the target of the alias before the alias itself
+ // and the symbols are not necessarily ordered in this way.
for (const MCSymbol &S : Asm.symbols()) {
if (!S.isVariable())
continue;
- assert(S.isExternal());
assert(S.isDefined(/*SetUsed=*/false));
const auto &WS = static_cast<const MCSymbolWasm &>(S);
-
- // Find the target symbol of this weak alias
+ // Find the target symbol of this weak alias and export that index
const MCExpr *Expr = WS.getVariableValue();
- auto *Inner = dyn_cast<MCSymbolRefExpr>(Expr);
+ auto *Inner = cast<MCSymbolRefExpr>(Expr);
const auto *ResolvedSym = cast<MCSymbolWasm>(&Inner->getSymbol());
+ DEBUG(dbgs() << WS.getName() << ": weak alias of '" << *ResolvedSym << "'\n");
+ assert(SymbolIndices.count(ResolvedSym) > 0);
uint32_t Index = SymbolIndices.find(ResolvedSym)->second;
- DEBUG(dbgs() << "Weak alias: '" << WS << "' -> '" << ResolvedSym << "' = " << Index << "\n");
- SymbolIndices[&WS] = Index;
+ DEBUG(dbgs() << " -> index:" << Index << "\n");
WasmExport Export;
Export.FieldName = WS.getName();
@@ -1245,7 +1254,7 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
Export.Kind = wasm::WASM_EXTERNAL_FUNCTION;
else
Export.Kind = wasm::WASM_EXTERNAL_GLOBAL;
- WeakSymbols.push_back(Export.FieldName);
+ DEBUG(dbgs() << " -> export " << Exports.size() << "\n");
Exports.push_back(Export);
}
@@ -1254,15 +1263,7 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
if (Fixup.Type != wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB)
continue;
- WasmFunctionType F;
- F.Returns = Fixup.Symbol->getReturns();
- F.Params = Fixup.Symbol->getParams();
- auto Pair =
- FunctionTypeIndices.insert(std::make_pair(F, FunctionTypes.size()));
- if (Pair.second)
- FunctionTypes.push_back(F);
-
- TypeIndices[Fixup.Symbol] = Pair.first->second;
+ registerFunctionType(*Fixup.Symbol);
}
// Write out the Wasm header.
diff --git a/lib/MC/WinCOFFObjectWriter.cpp b/lib/MC/WinCOFFObjectWriter.cpp
index fc5234950391..956ae70b38d1 100644
--- a/lib/MC/WinCOFFObjectWriter.cpp
+++ b/lib/MC/WinCOFFObjectWriter.cpp
@@ -197,8 +197,7 @@ public:
void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, bool &IsPCRel,
- uint64_t &FixedValue) override;
+ MCValue Target, uint64_t &FixedValue) override;
void createFileSymbols(MCAssembler &Asm);
void assignSectionNumbers();
@@ -708,9 +707,11 @@ bool WinCOFFObjectWriter::isSymbolRefDifferenceFullyResolvedImpl(
InSet, IsPCRel);
}
-void WinCOFFObjectWriter::recordRelocation(
- MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment *Fragment,
- const MCFixup &Fixup, MCValue Target, bool &IsPCRel, uint64_t &FixedValue) {
+void WinCOFFObjectWriter::recordRelocation(MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
assert(Target.getSymA() && "Relocation must reference a symbol!");
const MCSymbol &A = Target.getSymA()->getSymbol();
diff --git a/lib/Object/WasmObjectFile.cpp b/lib/Object/WasmObjectFile.cpp
index fff497ba5564..7f80bf0b83a0 100644
--- a/lib/Object/WasmObjectFile.cpp
+++ b/lib/Object/WasmObjectFile.cpp
@@ -567,20 +567,16 @@ Error WasmObjectFile::parseExportSection(const uint8_t *Ptr, const uint8_t *End)
Ex.Name = readString(Ptr);
Ex.Kind = readUint8(Ptr);
Ex.Index = readVaruint32(Ptr);
+ WasmSymbol::SymbolType ExportType;
+ bool MakeSymbol = false;
switch (Ex.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
- SymbolMap.try_emplace(Ex.Name, Symbols.size());
- Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::FUNCTION_EXPORT,
- Sections.size(), i);
- DEBUG(dbgs() << "Adding export: " << Symbols.back()
- << " sym index:" << Symbols.size() << "\n");
+ ExportType = WasmSymbol::SymbolType::FUNCTION_EXPORT;
+ MakeSymbol = true;
break;
case wasm::WASM_EXTERNAL_GLOBAL:
- SymbolMap.try_emplace(Ex.Name, Symbols.size());
- Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::GLOBAL_EXPORT,
- Sections.size(), i);
- DEBUG(dbgs() << "Adding export: " << Symbols.back()
- << " sym index:" << Symbols.size() << "\n");
+ ExportType = WasmSymbol::SymbolType::GLOBAL_EXPORT;
+ MakeSymbol = true;
break;
case wasm::WASM_EXTERNAL_MEMORY:
case wasm::WASM_EXTERNAL_TABLE:
@@ -589,6 +585,20 @@ Error WasmObjectFile::parseExportSection(const uint8_t *Ptr, const uint8_t *End)
return make_error<GenericBinaryError>(
"Unexpected export kind", object_error::parse_failed);
}
+ if (MakeSymbol) {
+ auto Pair = SymbolMap.try_emplace(Ex.Name, Symbols.size());
+ if (Pair.second) {
+ Symbols.emplace_back(Ex.Name, ExportType,
+ Sections.size(), i);
+ DEBUG(dbgs() << "Adding export: " << Symbols.back()
+ << " sym index:" << Symbols.size() << "\n");
+ } else {
+ uint32_t SymIndex = Pair.first->second;
+ Symbols[SymIndex] = WasmSymbol(Ex.Name, ExportType, Sections.size(), i);
+ DEBUG(dbgs() << "Replacing existing symbol: " << Symbols[SymIndex]
+ << " sym index:" << SymIndex << "\n");
+ }
+ }
Exports.push_back(Ex);
}
if (Ptr != End)
@@ -665,15 +675,17 @@ Error WasmObjectFile::parseElemSection(const uint8_t *Ptr, const uint8_t *End) {
}
Error WasmObjectFile::parseDataSection(const uint8_t *Ptr, const uint8_t *End) {
+ const uint8_t *Start = Ptr;
uint32_t Count = readVaruint32(Ptr);
DataSegments.reserve(Count);
while (Count--) {
- wasm::WasmDataSegment Segment;
- Segment.Index = readVaruint32(Ptr);
- if (Error Err = readInitExpr(Segment.Offset, Ptr))
+ WasmSegment Segment;
+ Segment.Data.MemoryIndex = readVaruint32(Ptr);
+ if (Error Err = readInitExpr(Segment.Data.Offset, Ptr))
return Err;
uint32_t Size = readVaruint32(Ptr);
- Segment.Content = ArrayRef<uint8_t>(Ptr, Size);
+ Segment.Data.Content = ArrayRef<uint8_t>(Ptr, Size);
+ Segment.SectionOffset = Ptr - Start;
Ptr += Size;
DataSegments.push_back(Segment);
}
diff --git a/lib/Object/WindowsResource.cpp b/lib/Object/WindowsResource.cpp
index 1371eacdf8f2..246eee5ddb31 100644
--- a/lib/Object/WindowsResource.cpp
+++ b/lib/Object/WindowsResource.cpp
@@ -609,8 +609,8 @@ void WindowsResourceCOFFWriter::writeDirectoryTree() {
for (auto const &Child : StringChildren) {
auto *Entry = reinterpret_cast<coff_resource_dir_entry *>(BufferStart +
CurrentOffset);
- Entry->Identifier.NameOffset =
- StringTableOffsets[Child.second->getStringIndex()];
+ Entry->Identifier.setNameOffset(
+ StringTableOffsets[Child.second->getStringIndex()]);
if (Child.second->checkIsDataNode()) {
Entry->Offset.DataEntryOffset = NextLevelOffset;
NextLevelOffset += sizeof(coff_resource_data_entry);
diff --git a/lib/ObjectYAML/WasmYAML.cpp b/lib/ObjectYAML/WasmYAML.cpp
index 2040efdc9d11..6a68cd265ad8 100644
--- a/lib/ObjectYAML/WasmYAML.cpp
+++ b/lib/ObjectYAML/WasmYAML.cpp
@@ -345,7 +345,8 @@ void MappingTraits<wasm::WasmInitExpr>::mapping(IO &IO,
void MappingTraits<WasmYAML::DataSegment>::mapping(
IO &IO, WasmYAML::DataSegment &Segment) {
- IO.mapRequired("Index", Segment.Index);
+ IO.mapOptional("SectionOffset", Segment.SectionOffset);
+ IO.mapRequired("MemoryIndex", Segment.MemoryIndex);
IO.mapRequired("Offset", Segment.Offset);
IO.mapRequired("Content", Segment.Content);
}
diff --git a/lib/Option/OptTable.cpp b/lib/Option/OptTable.cpp
index acb9e8d015bc..bcd365236e46 100644
--- a/lib/Option/OptTable.cpp
+++ b/lib/Option/OptTable.cpp
@@ -225,11 +225,15 @@ OptTable::suggestValueCompletions(StringRef Option, StringRef Arg) const {
return {};
}
-std::vector<std::string> OptTable::findByPrefix(StringRef Cur) const {
+std::vector<std::string>
+OptTable::findByPrefix(StringRef Cur, unsigned short DisableFlags) const {
std::vector<std::string> Ret;
for (const Info &In : OptionInfos.slice(FirstSearchableIndex)) {
- if (!In.Prefixes)
+ if (!In.Prefixes || (!In.HelpText && !In.GroupID))
+ continue;
+ if (In.Flags & DisableFlags)
continue;
+
for (int I = 0; In.Prefixes[I]; I++) {
std::string S = std::string(In.Prefixes[I]) + std::string(In.Name);
if (StringRef(S).startswith(Cur))
diff --git a/lib/Passes/PassBuilder.cpp b/lib/Passes/PassBuilder.cpp
index 0380bd991d71..9e0cf27aa17b 100644
--- a/lib/Passes/PassBuilder.cpp
+++ b/lib/Passes/PassBuilder.cpp
@@ -281,33 +281,52 @@ AnalysisKey NoOpLoopAnalysis::Key;
} // End anonymous namespace.
+void PassBuilder::invokePeepholeEPCallbacks(
+ FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ for (auto &C : PeepholeEPCallbacks)
+ C(FPM, Level);
+}
+
void PassBuilder::registerModuleAnalyses(ModuleAnalysisManager &MAM) {
#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
MAM.registerPass([&] { return CREATE_PASS; });
#include "PassRegistry.def"
+
+ for (auto &C : ModuleAnalysisRegistrationCallbacks)
+ C(MAM);
}
void PassBuilder::registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM) {
#define CGSCC_ANALYSIS(NAME, CREATE_PASS) \
CGAM.registerPass([&] { return CREATE_PASS; });
#include "PassRegistry.def"
+
+ for (auto &C : CGSCCAnalysisRegistrationCallbacks)
+ C(CGAM);
}
void PassBuilder::registerFunctionAnalyses(FunctionAnalysisManager &FAM) {
#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
FAM.registerPass([&] { return CREATE_PASS; });
#include "PassRegistry.def"
+
+ for (auto &C : FunctionAnalysisRegistrationCallbacks)
+ C(FAM);
}
void PassBuilder::registerLoopAnalyses(LoopAnalysisManager &LAM) {
#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
LAM.registerPass([&] { return CREATE_PASS; });
#include "PassRegistry.def"
+
+ for (auto &C : LoopAnalysisRegistrationCallbacks)
+ C(LAM);
}
FunctionPassManager
PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
- bool DebugLogging) {
+ bool DebugLogging,
+ bool PrepareForThinLTO) {
assert(Level != O0 && "Must request optimizations!");
FunctionPassManager FPM(DebugLogging);
@@ -340,6 +359,8 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
if (!isOptimizingForSize(Level))
FPM.addPass(LibCallsShrinkWrapPass());
+ invokePeepholeEPCallbacks(FPM, Level);
+
FPM.addPass(TailCallElimPass());
FPM.addPass(SimplifyCFGPass());
@@ -363,11 +384,19 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
LPM1.addPass(SimpleLoopUnswitchPass());
LPM2.addPass(IndVarSimplifyPass());
LPM2.addPass(LoopIdiomRecognizePass());
+
+ for (auto &C : LateLoopOptimizationsEPCallbacks)
+ C(LPM2, Level);
+
LPM2.addPass(LoopDeletionPass());
- // FIXME: The old pass manager has a hack to disable loop unrolling during
- // ThinLTO when using sample PGO. Need to either fix it or port some
- // workaround.
- LPM2.addPass(LoopUnrollPass::createFull(Level));
+ // Do not enable unrolling in PrepareForThinLTO phase during sample PGO
+ // because it changes IR to makes profile annotation in back compile
+ // inaccurate.
+ if (!PrepareForThinLTO || !PGOOpt || PGOOpt->SampleProfileFile.empty())
+ LPM2.addPass(LoopUnrollPass::createFull(Level));
+
+ for (auto &C : LoopOptimizerEndEPCallbacks)
+ C(LPM2, Level);
// We provide the opt remark emitter pass for LICM to use. We only need to do
// this once as it is immutable.
@@ -403,6 +432,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
// Run instcombine after redundancy and dead bit elimination to exploit
// opportunities opened up by them.
FPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(FPM, Level);
// Re-consider control flow based optimizations after redundancy elimination,
// redo DCE, etc.
@@ -411,19 +441,24 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
FPM.addPass(DSEPass());
FPM.addPass(createFunctionToLoopPassAdaptor(LICMPass()));
+ for (auto &C : ScalarOptimizerLateEPCallbacks)
+ C(FPM, Level);
+
// Finally, do an expensive DCE pass to catch all the dead code exposed by
// the simplifications and basic cleanup after all the simplifications.
FPM.addPass(ADCEPass());
FPM.addPass(SimplifyCFGPass());
FPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(FPM, Level);
return FPM;
}
-static void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
- PassBuilder::OptimizationLevel Level,
- bool RunProfileGen, std::string ProfileGenFile,
- std::string ProfileUseFile) {
+void PassBuilder::addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
+ PassBuilder::OptimizationLevel Level,
+ bool RunProfileGen,
+ std::string ProfileGenFile,
+ std::string ProfileUseFile) {
// Generally running simplification passes and the inliner with an high
// threshold results in smaller executables, but there may be cases where
// the size grows, so let's be conservative here and skip this simplification
@@ -448,9 +483,8 @@ static void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
FPM.addPass(EarlyCSEPass()); // Catch trivial redundancies.
FPM.addPass(SimplifyCFGPass()); // Merge & remove basic blocks.
FPM.addPass(InstCombinePass()); // Combine silly sequences.
+ invokePeepholeEPCallbacks(FPM, Level);
- // FIXME: Here the old pass manager inserts peephole extensions.
- // Add them when they're supported.
CGPipeline.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPipeline)));
@@ -490,7 +524,8 @@ getInlineParamsFromOptLevel(PassBuilder::OptimizationLevel Level) {
ModulePassManager
PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level,
- bool DebugLogging) {
+ bool DebugLogging,
+ bool PrepareForThinLTO) {
ModulePassManager MPM(DebugLogging);
// Do basic inference of function attributes from known properties of system
@@ -530,6 +565,8 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level,
// optimizations.
FunctionPassManager GlobalCleanupPM(DebugLogging);
GlobalCleanupPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(GlobalCleanupPM, Level);
+
GlobalCleanupPM.addPass(SimplifyCFGPass());
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(GlobalCleanupPM)));
@@ -544,8 +581,11 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level,
MPM.addPass(SampleProfileLoaderPass(PGOOpt->SampleProfileFile));
// Indirect call promotion that promotes intra-module targes only.
- MPM.addPass(PGOIndirectCallPromotion(
- false, PGOOpt && !PGOOpt->SampleProfileFile.empty()));
+ // Do not enable it in PrepareForThinLTO phase during sample PGO because
+ // it changes IR to makes profile annotation in back compile inaccurate.
+ if (!PrepareForThinLTO || PGOOpt->SampleProfileFile.empty())
+ MPM.addPass(PGOIndirectCallPromotion(
+ false, PGOOpt && !PGOOpt->SampleProfileFile.empty()));
}
// Require the GlobalsAA analysis for the module so we can query it within
@@ -570,7 +610,12 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level,
// Run the inliner first. The theory is that we are walking bottom-up and so
// the callees have already been fully optimized, and we want to inline them
// into the callers so that our optimizations can reflect that.
- MainCGPipeline.addPass(InlinerPass(getInlineParamsFromOptLevel(Level)));
+ // For PrepareForThinLTO pass, we disable hot-caller heuristic for sample PGO
+ // because it makes profile annotation in the backend inaccurate.
+ InlineParams IP = getInlineParamsFromOptLevel(Level);
+ if (PrepareForThinLTO && PGOOpt && !PGOOpt->SampleProfileFile.empty())
+ IP.HotCallSiteThreshold = 0;
+ MainCGPipeline.addPass(InlinerPass(IP));
// Now deduce any function attributes based in the current code.
MainCGPipeline.addPass(PostOrderFunctionAttrsPass());
@@ -583,7 +628,11 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level,
// Lastly, add the core function simplification pipeline nested inside the
// CGSCC walk.
MainCGPipeline.addPass(createCGSCCToFunctionPassAdaptor(
- buildFunctionSimplificationPipeline(Level, DebugLogging)));
+ buildFunctionSimplificationPipeline(Level, DebugLogging,
+ PrepareForThinLTO)));
+
+ for (auto &C : CGSCCOptimizerLateEPCallbacks)
+ C(MainCGPipeline, Level);
// We wrap the CGSCC pipeline in a devirtualization repeater. This will try
// to detect when we devirtualize indirect calls and iterate the SCC passes
@@ -643,6 +692,9 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
// rather than on each loop in an inside-out manner, and so they are actually
// function passes.
+ for (auto &C : VectorizerStartEPCallbacks)
+ C(OptimizePM, Level);
+
// First rotate loops that may have been un-rotated by prior passes.
OptimizePM.addPass(createFunctionToLoopPassAdaptor(LoopRotatePass()));
@@ -726,7 +778,8 @@ PassBuilder::buildPerModuleDefaultPipeline(OptimizationLevel Level,
MPM.addPass(ForceFunctionAttrsPass());
// Add the core simplification pipeline.
- MPM.addPass(buildModuleSimplificationPipeline(Level, DebugLogging));
+ MPM.addPass(buildModuleSimplificationPipeline(Level, DebugLogging,
+ /*PrepareForThinLTO=*/false));
// Now add the optimization pipeline.
MPM.addPass(buildModuleOptimizationPipeline(Level, DebugLogging));
@@ -747,7 +800,8 @@ PassBuilder::buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
// If we are planning to perform ThinLTO later, we don't bloat the code with
// unrolling/vectorization/... now. Just simplify the module as much as we
// can.
- MPM.addPass(buildModuleSimplificationPipeline(Level, DebugLogging));
+ MPM.addPass(buildModuleSimplificationPipeline(Level, DebugLogging,
+ /*PrepareForThinLTO=*/true));
// Run partial inlining pass to partially inline functions that have
// large bodies.
@@ -785,7 +839,8 @@ PassBuilder::buildThinLTODefaultPipeline(OptimizationLevel Level,
!PGOOpt->ProfileUseFile.empty()));
// Add the core simplification pipeline.
- MPM.addPass(buildModuleSimplificationPipeline(Level, DebugLogging));
+ MPM.addPass(buildModuleSimplificationPipeline(Level, DebugLogging,
+ /*PrepareForThinLTO=*/false));
// Now add the optimization pipeline.
MPM.addPass(buildModuleOptimizationPipeline(Level, DebugLogging));
@@ -868,8 +923,11 @@ ModulePassManager PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
// simplification opportunities, and both can propagate functions through
// function pointers. When this happens, we often have to resolve varargs
// calls, etc, so let instcombine do this.
- // FIXME: add peephole extensions here as the legacy PM does.
- MPM.addPass(createModuleToFunctionPassAdaptor(InstCombinePass()));
+ FunctionPassManager PeepholeFPM(DebugLogging);
+ PeepholeFPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(PeepholeFPM, Level);
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(PeepholeFPM)));
// Note: historically, the PruneEH pass was run first to deduce nounwind and
// generally clean up exception handling overhead. It isn't clear this is
@@ -887,10 +945,10 @@ ModulePassManager PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
MPM.addPass(GlobalDCEPass());
FunctionPassManager FPM(DebugLogging);
-
// The IPO Passes may leave cruft around. Clean up after them.
- // FIXME: add peephole extensions here as the legacy PM does.
FPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(FPM, Level);
+
FPM.addPass(JumpThreadingPass());
// Break up allocas
@@ -937,8 +995,11 @@ ModulePassManager PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
MainFPM.add(AlignmentFromAssumptionsPass());
#endif
- // FIXME: add peephole extensions to the PM here.
+ // FIXME: Conditionally run LoadCombine here, after it's ported
+ // (in case we still have this pass, given its questionable usefulness).
+
MainFPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(MainFPM, Level);
MainFPM.addPass(JumpThreadingPass());
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(MainFPM)));
@@ -1021,7 +1082,27 @@ static bool startsWithDefaultPipelineAliasPrefix(StringRef Name) {
Name.startswith("lto");
}
-static bool isModulePassName(StringRef Name) {
+/// Tests whether registered callbacks will accept a given pass name.
+///
+/// When parsing a pipeline text, the type of the outermost pipeline may be
+/// omitted, in which case the type is automatically determined from the first
+/// pass name in the text. This may be a name that is handled through one of the
+/// callbacks. We check this through the oridinary parsing callbacks by setting
+/// up a dummy PassManager in order to not force the client to also handle this
+/// type of query.
+template <typename PassManagerT, typename CallbacksT>
+static bool callbacksAcceptPassName(StringRef Name, CallbacksT &Callbacks) {
+ if (!Callbacks.empty()) {
+ PassManagerT DummyPM;
+ for (auto &CB : Callbacks)
+ if (CB(Name, DummyPM, {}))
+ return true;
+ }
+ return false;
+}
+
+template <typename CallbacksT>
+static bool isModulePassName(StringRef Name, CallbacksT &Callbacks) {
// Manually handle aliases for pre-configured pipeline fragments.
if (startsWithDefaultPipelineAliasPrefix(Name))
return DefaultAliasRegex.match(Name);
@@ -1046,10 +1127,11 @@ static bool isModulePassName(StringRef Name) {
return true;
#include "PassRegistry.def"
- return false;
+ return callbacksAcceptPassName<ModulePassManager>(Name, Callbacks);
}
-static bool isCGSCCPassName(StringRef Name) {
+template <typename CallbacksT>
+static bool isCGSCCPassName(StringRef Name, CallbacksT &Callbacks) {
// Explicitly handle pass manager names.
if (Name == "cgscc")
return true;
@@ -1070,10 +1152,11 @@ static bool isCGSCCPassName(StringRef Name) {
return true;
#include "PassRegistry.def"
- return false;
+ return callbacksAcceptPassName<CGSCCPassManager>(Name, Callbacks);
}
-static bool isFunctionPassName(StringRef Name) {
+template <typename CallbacksT>
+static bool isFunctionPassName(StringRef Name, CallbacksT &Callbacks) {
// Explicitly handle pass manager names.
if (Name == "function")
return true;
@@ -1092,10 +1175,11 @@ static bool isFunctionPassName(StringRef Name) {
return true;
#include "PassRegistry.def"
- return false;
+ return callbacksAcceptPassName<FunctionPassManager>(Name, Callbacks);
}
-static bool isLoopPassName(StringRef Name) {
+template <typename CallbacksT>
+static bool isLoopPassName(StringRef Name, CallbacksT &Callbacks) {
// Explicitly handle pass manager names.
if (Name == "loop")
return true;
@@ -1112,7 +1196,7 @@ static bool isLoopPassName(StringRef Name) {
return true;
#include "PassRegistry.def"
- return false;
+ return callbacksAcceptPassName<LoopPassManager>(Name, Callbacks);
}
Optional<std::vector<PassBuilder::PipelineElement>>
@@ -1213,6 +1297,11 @@ bool PassBuilder::parseModulePass(ModulePassManager &MPM,
MPM.addPass(createRepeatedPass(*Count, std::move(NestedMPM)));
return true;
}
+
+ for (auto &C : ModulePipelineParsingCallbacks)
+ if (C(Name, MPM, InnerPipeline))
+ return true;
+
// Normal passes can't have pipelines.
return false;
}
@@ -1225,12 +1314,12 @@ bool PassBuilder::parseModulePass(ModulePassManager &MPM,
assert(Matches.size() == 3 && "Must capture two matched strings!");
OptimizationLevel L = StringSwitch<OptimizationLevel>(Matches[2])
- .Case("O0", O0)
- .Case("O1", O1)
- .Case("O2", O2)
- .Case("O3", O3)
- .Case("Os", Os)
- .Case("Oz", Oz);
+ .Case("O0", O0)
+ .Case("O1", O1)
+ .Case("O2", O2)
+ .Case("O3", O3)
+ .Case("Os", Os)
+ .Case("Oz", Oz);
if (L == O0)
// At O0 we do nothing at all!
return true;
@@ -1270,6 +1359,9 @@ bool PassBuilder::parseModulePass(ModulePassManager &MPM,
}
#include "PassRegistry.def"
+ for (auto &C : ModulePipelineParsingCallbacks)
+ if (C(Name, MPM, InnerPipeline))
+ return true;
return false;
}
@@ -1317,11 +1409,16 @@ bool PassBuilder::parseCGSCCPass(CGSCCPassManager &CGPM,
*MaxRepetitions, DebugLogging));
return true;
}
+
+ for (auto &C : CGSCCPipelineParsingCallbacks)
+ if (C(Name, CGPM, InnerPipeline))
+ return true;
+
// Normal passes can't have pipelines.
return false;
}
- // Now expand the basic registered passes from the .inc file.
+// Now expand the basic registered passes from the .inc file.
#define CGSCC_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
CGPM.addPass(CREATE_PASS); \
@@ -1342,6 +1439,9 @@ bool PassBuilder::parseCGSCCPass(CGSCCPassManager &CGPM,
}
#include "PassRegistry.def"
+ for (auto &C : CGSCCPipelineParsingCallbacks)
+ if (C(Name, CGPM, InnerPipeline))
+ return true;
return false;
}
@@ -1379,11 +1479,16 @@ bool PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
FPM.addPass(createRepeatedPass(*Count, std::move(NestedFPM)));
return true;
}
+
+ for (auto &C : FunctionPipelineParsingCallbacks)
+ if (C(Name, FPM, InnerPipeline))
+ return true;
+
// Normal passes can't have pipelines.
return false;
}
- // Now expand the basic registered passes from the .inc file.
+// Now expand the basic registered passes from the .inc file.
#define FUNCTION_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
FPM.addPass(CREATE_PASS); \
@@ -1403,6 +1508,9 @@ bool PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
}
#include "PassRegistry.def"
+ for (auto &C : FunctionPipelineParsingCallbacks)
+ if (C(Name, FPM, InnerPipeline))
+ return true;
return false;
}
@@ -1430,11 +1538,16 @@ bool PassBuilder::parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
LPM.addPass(createRepeatedPass(*Count, std::move(NestedLPM)));
return true;
}
+
+ for (auto &C : LoopPipelineParsingCallbacks)
+ if (C(Name, LPM, InnerPipeline))
+ return true;
+
// Normal passes can't have pipelines.
return false;
}
- // Now expand the basic registered passes from the .inc file.
+// Now expand the basic registered passes from the .inc file.
#define LOOP_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
LPM.addPass(CREATE_PASS); \
@@ -1455,6 +1568,9 @@ bool PassBuilder::parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
}
#include "PassRegistry.def"
+ for (auto &C : LoopPipelineParsingCallbacks)
+ if (C(Name, LPM, InnerPipeline))
+ return true;
return false;
}
@@ -1473,6 +1589,9 @@ bool PassBuilder::parseAAPassName(AAManager &AA, StringRef Name) {
}
#include "PassRegistry.def"
+ for (auto &C : AAParsingCallbacks)
+ if (C(Name, AA))
+ return true;
return false;
}
@@ -1539,7 +1658,7 @@ bool PassBuilder::parseModulePassPipeline(ModulePassManager &MPM,
return true;
}
-// Primary pass pipeline description parsing routine.
+// Primary pass pipeline description parsing routine for a \c ModulePassManager
// FIXME: Should this routine accept a TargetMachine or require the caller to
// pre-populate the analysis managers with target-specific stuff?
bool PassBuilder::parsePassPipeline(ModulePassManager &MPM,
@@ -1553,21 +1672,70 @@ bool PassBuilder::parsePassPipeline(ModulePassManager &MPM,
// automatically.
StringRef FirstName = Pipeline->front().Name;
- if (!isModulePassName(FirstName)) {
- if (isCGSCCPassName(FirstName))
+ if (!isModulePassName(FirstName, ModulePipelineParsingCallbacks)) {
+ if (isCGSCCPassName(FirstName, CGSCCPipelineParsingCallbacks)) {
Pipeline = {{"cgscc", std::move(*Pipeline)}};
- else if (isFunctionPassName(FirstName))
+ } else if (isFunctionPassName(FirstName,
+ FunctionPipelineParsingCallbacks)) {
Pipeline = {{"function", std::move(*Pipeline)}};
- else if (isLoopPassName(FirstName))
+ } else if (isLoopPassName(FirstName, LoopPipelineParsingCallbacks)) {
Pipeline = {{"function", {{"loop", std::move(*Pipeline)}}}};
- else
+ } else {
+ for (auto &C : TopLevelPipelineParsingCallbacks)
+ if (C(MPM, *Pipeline, VerifyEachPass, DebugLogging))
+ return true;
+
// Unknown pass name!
return false;
+ }
}
return parseModulePassPipeline(MPM, *Pipeline, VerifyEachPass, DebugLogging);
}
+// Primary pass pipeline description parsing routine for a \c CGSCCPassManager
+bool PassBuilder::parsePassPipeline(CGSCCPassManager &CGPM,
+ StringRef PipelineText, bool VerifyEachPass,
+ bool DebugLogging) {
+ auto Pipeline = parsePipelineText(PipelineText);
+ if (!Pipeline || Pipeline->empty())
+ return false;
+
+ StringRef FirstName = Pipeline->front().Name;
+ if (!isCGSCCPassName(FirstName, CGSCCPipelineParsingCallbacks))
+ return false;
+
+ return parseCGSCCPassPipeline(CGPM, *Pipeline, VerifyEachPass, DebugLogging);
+}
+
+// Primary pass pipeline description parsing routine for a \c
+// FunctionPassManager
+bool PassBuilder::parsePassPipeline(FunctionPassManager &FPM,
+ StringRef PipelineText, bool VerifyEachPass,
+ bool DebugLogging) {
+ auto Pipeline = parsePipelineText(PipelineText);
+ if (!Pipeline || Pipeline->empty())
+ return false;
+
+ StringRef FirstName = Pipeline->front().Name;
+ if (!isFunctionPassName(FirstName, FunctionPipelineParsingCallbacks))
+ return false;
+
+ return parseFunctionPassPipeline(FPM, *Pipeline, VerifyEachPass,
+ DebugLogging);
+}
+
+// Primary pass pipeline description parsing routine for a \c LoopPassManager
+bool PassBuilder::parsePassPipeline(LoopPassManager &CGPM,
+ StringRef PipelineText, bool VerifyEachPass,
+ bool DebugLogging) {
+ auto Pipeline = parsePipelineText(PipelineText);
+ if (!Pipeline || Pipeline->empty())
+ return false;
+
+ return parseLoopPassPipeline(CGPM, *Pipeline, VerifyEachPass, DebugLogging);
+}
+
bool PassBuilder::parseAAPipeline(AAManager &AA, StringRef PipelineText) {
// If the pipeline just consists of the word 'default' just replace the AA
// manager with our default one.
diff --git a/lib/ProfileData/InstrProf.cpp b/lib/ProfileData/InstrProf.cpp
index a1d18724fcd5..48c1643cb13c 100644
--- a/lib/ProfileData/InstrProf.cpp
+++ b/lib/ProfileData/InstrProf.cpp
@@ -460,9 +460,9 @@ Error readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab) {
return Error::success();
}
-void InstrProfValueSiteRecord::merge(SoftInstrProfErrors &SIPE,
- InstrProfValueSiteRecord &Input,
- uint64_t Weight) {
+void InstrProfValueSiteRecord::merge(InstrProfValueSiteRecord &Input,
+ uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn) {
this->sortByTargetValues();
Input.sortByTargetValues();
auto I = ValueData.begin();
@@ -475,7 +475,7 @@ void InstrProfValueSiteRecord::merge(SoftInstrProfErrors &SIPE,
bool Overflowed;
I->Count = SaturatingMultiplyAdd(J->Count, Weight, I->Count, &Overflowed);
if (Overflowed)
- SIPE.addError(instrprof_error::counter_overflow);
+ Warn(instrprof_error::counter_overflow);
++I;
continue;
}
@@ -483,25 +483,25 @@ void InstrProfValueSiteRecord::merge(SoftInstrProfErrors &SIPE,
}
}
-void InstrProfValueSiteRecord::scale(SoftInstrProfErrors &SIPE,
- uint64_t Weight) {
+void InstrProfValueSiteRecord::scale(uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn) {
for (auto I = ValueData.begin(), IE = ValueData.end(); I != IE; ++I) {
bool Overflowed;
I->Count = SaturatingMultiply(I->Count, Weight, &Overflowed);
if (Overflowed)
- SIPE.addError(instrprof_error::counter_overflow);
+ Warn(instrprof_error::counter_overflow);
}
}
// Merge Value Profile data from Src record to this record for ValueKind.
// Scale merged value counts by \p Weight.
-void InstrProfRecord::mergeValueProfData(uint32_t ValueKind,
- InstrProfRecord &Src,
- uint64_t Weight) {
+void InstrProfRecord::mergeValueProfData(
+ uint32_t ValueKind, InstrProfRecord &Src, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn) {
uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
uint32_t OtherNumValueSites = Src.getNumValueSites(ValueKind);
if (ThisNumValueSites != OtherNumValueSites) {
- SIPE.addError(instrprof_error::value_site_count_mismatch);
+ Warn(instrprof_error::value_site_count_mismatch);
return;
}
if (!ThisNumValueSites)
@@ -511,14 +511,15 @@ void InstrProfRecord::mergeValueProfData(uint32_t ValueKind,
MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
Src.getValueSitesForKind(ValueKind);
for (uint32_t I = 0; I < ThisNumValueSites; I++)
- ThisSiteRecords[I].merge(SIPE, OtherSiteRecords[I], Weight);
+ ThisSiteRecords[I].merge(OtherSiteRecords[I], Weight, Warn);
}
-void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight) {
+void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn) {
// If the number of counters doesn't match we either have bad data
// or a hash collision.
if (Counts.size() != Other.Counts.size()) {
- SIPE.addError(instrprof_error::count_mismatch);
+ Warn(instrprof_error::count_mismatch);
return;
}
@@ -527,27 +528,30 @@ void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight) {
Counts[I] =
SaturatingMultiplyAdd(Other.Counts[I], Weight, Counts[I], &Overflowed);
if (Overflowed)
- SIPE.addError(instrprof_error::counter_overflow);
+ Warn(instrprof_error::counter_overflow);
}
for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
- mergeValueProfData(Kind, Other, Weight);
+ mergeValueProfData(Kind, Other, Weight, Warn);
}
-void InstrProfRecord::scaleValueProfData(uint32_t ValueKind, uint64_t Weight) {
+void InstrProfRecord::scaleValueProfData(
+ uint32_t ValueKind, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn) {
for (auto &R : getValueSitesForKind(ValueKind))
- R.scale(SIPE, Weight);
+ R.scale(Weight, Warn);
}
-void InstrProfRecord::scale(uint64_t Weight) {
+void InstrProfRecord::scale(uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn) {
for (auto &Count : this->Counts) {
bool Overflowed;
Count = SaturatingMultiply(Count, Weight, &Overflowed);
if (Overflowed)
- SIPE.addError(instrprof_error::counter_overflow);
+ Warn(instrprof_error::counter_overflow);
}
for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
- scaleValueProfData(Kind, Weight);
+ scaleValueProfData(Kind, Weight, Warn);
}
// Map indirect call target name hash to name string.
diff --git a/lib/ProfileData/InstrProfReader.cpp b/lib/ProfileData/InstrProfReader.cpp
index 1ed1fb8b6f0b..1b39a0695aac 100644
--- a/lib/ProfileData/InstrProfReader.cpp
+++ b/lib/ProfileData/InstrProfReader.cpp
@@ -221,7 +221,7 @@ TextInstrProfReader::readValueProfileData(InstrProfRecord &Record) {
#undef VP_READ_ADVANCE
}
-Error TextInstrProfReader::readNextRecord(InstrProfRecord &Record) {
+Error TextInstrProfReader::readNextRecord(NamedInstrProfRecord &Record) {
// Skip empty lines and comments.
while (!Line.is_at_end() && (Line->empty() || Line->startswith("#")))
++Line;
@@ -377,13 +377,13 @@ Error RawInstrProfReader<IntPtrT>::readHeader(
}
template <class IntPtrT>
-Error RawInstrProfReader<IntPtrT>::readName(InstrProfRecord &Record) {
+Error RawInstrProfReader<IntPtrT>::readName(NamedInstrProfRecord &Record) {
Record.Name = getName(Data->NameRef);
return success();
}
template <class IntPtrT>
-Error RawInstrProfReader<IntPtrT>::readFuncHash(InstrProfRecord &Record) {
+Error RawInstrProfReader<IntPtrT>::readFuncHash(NamedInstrProfRecord &Record) {
Record.Hash = swap(Data->FuncHash);
return success();
}
@@ -445,7 +445,7 @@ Error RawInstrProfReader<IntPtrT>::readValueProfilingData(
}
template <class IntPtrT>
-Error RawInstrProfReader<IntPtrT>::readNextRecord(InstrProfRecord &Record) {
+Error RawInstrProfReader<IntPtrT>::readNextRecord(NamedInstrProfRecord &Record) {
if (atEnd())
// At this point, ValueDataStart field points to the next header.
if (Error E = readNextHeader(getNextHeaderPos()))
@@ -550,7 +550,7 @@ data_type InstrProfLookupTrait::ReadData(StringRef K, const unsigned char *D,
template <typename HashTableImpl>
Error InstrProfReaderIndex<HashTableImpl>::getRecords(
- StringRef FuncName, ArrayRef<InstrProfRecord> &Data) {
+ StringRef FuncName, ArrayRef<NamedInstrProfRecord> &Data) {
auto Iter = HashTable->find(FuncName);
if (Iter == HashTable->end())
return make_error<InstrProfError>(instrprof_error::unknown_function);
@@ -564,7 +564,7 @@ Error InstrProfReaderIndex<HashTableImpl>::getRecords(
template <typename HashTableImpl>
Error InstrProfReaderIndex<HashTableImpl>::getRecords(
- ArrayRef<InstrProfRecord> &Data) {
+ ArrayRef<NamedInstrProfRecord> &Data) {
if (atEnd())
return make_error<InstrProfError>(instrprof_error::eof);
@@ -644,7 +644,7 @@ IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version,
InstrProfSummaryBuilder Builder(ProfileSummaryBuilder::DefaultCutoffs);
// FIXME: This only computes an empty summary. Need to call addRecord for
- // all InstrProfRecords to get the correct summary.
+ // all NamedInstrProfRecords to get the correct summary.
this->Summary = Builder.getSummary();
return Cur;
}
@@ -707,7 +707,7 @@ InstrProfSymtab &IndexedInstrProfReader::getSymtab() {
Expected<InstrProfRecord>
IndexedInstrProfReader::getInstrProfRecord(StringRef FuncName,
uint64_t FuncHash) {
- ArrayRef<InstrProfRecord> Data;
+ ArrayRef<NamedInstrProfRecord> Data;
Error Err = Index->getRecords(FuncName, Data);
if (Err)
return std::move(Err);
@@ -732,10 +732,10 @@ Error IndexedInstrProfReader::getFunctionCounts(StringRef FuncName,
return success();
}
-Error IndexedInstrProfReader::readNextRecord(InstrProfRecord &Record) {
+Error IndexedInstrProfReader::readNextRecord(NamedInstrProfRecord &Record) {
static unsigned RecordIndex = 0;
- ArrayRef<InstrProfRecord> Data;
+ ArrayRef<NamedInstrProfRecord> Data;
Error E = Index->getRecords(Data);
if (E)
diff --git a/lib/ProfileData/InstrProfWriter.cpp b/lib/ProfileData/InstrProfWriter.cpp
index 9efea78ed2a8..ce3f8806e12e 100644
--- a/lib/ProfileData/InstrProfWriter.cpp
+++ b/lib/ProfileData/InstrProfWriter.cpp
@@ -176,38 +176,46 @@ void InstrProfWriter::setOutputSparse(bool Sparse) {
this->Sparse = Sparse;
}
-Error InstrProfWriter::addRecord(InstrProfRecord &&I, uint64_t Weight) {
- auto &ProfileDataMap = FunctionData[I.Name];
+void InstrProfWriter::addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
+ function_ref<void(Error)> Warn) {
+ auto Name = I.Name;
+ auto Hash = I.Hash;
+ addRecord(Name, Hash, std::move(I), Weight, Warn);
+}
+
+void InstrProfWriter::addRecord(StringRef Name, uint64_t Hash,
+ InstrProfRecord &&I, uint64_t Weight,
+ function_ref<void(Error)> Warn) {
+ auto &ProfileDataMap = FunctionData[Name];
bool NewFunc;
ProfilingData::iterator Where;
std::tie(Where, NewFunc) =
- ProfileDataMap.insert(std::make_pair(I.Hash, InstrProfRecord()));
+ ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
InstrProfRecord &Dest = Where->second;
+ auto MapWarn = [&](instrprof_error E) {
+ Warn(make_error<InstrProfError>(E));
+ };
+
if (NewFunc) {
// We've never seen a function with this name and hash, add it.
Dest = std::move(I);
- // Fix up the name to avoid dangling reference.
- Dest.Name = FunctionData.find(Dest.Name)->getKey();
if (Weight > 1)
- Dest.scale(Weight);
+ Dest.scale(Weight, MapWarn);
} else {
// We're updating a function we've seen before.
- Dest.merge(I, Weight);
+ Dest.merge(I, Weight, MapWarn);
}
Dest.sortValueData();
-
- return Dest.takeError();
}
-Error InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW) {
+void InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW,
+ function_ref<void(Error)> Warn) {
for (auto &I : IPW.FunctionData)
for (auto &Func : I.getValue())
- if (Error E = addRecord(std::move(Func.second), 1))
- return E;
- return Error::success();
+ addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
}
bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
@@ -323,11 +331,12 @@ static const char *ValueProfKindStr[] = {
#include "llvm/ProfileData/InstrProfData.inc"
};
-void InstrProfWriter::writeRecordInText(const InstrProfRecord &Func,
+void InstrProfWriter::writeRecordInText(StringRef Name, uint64_t Hash,
+ const InstrProfRecord &Func,
InstrProfSymtab &Symtab,
raw_fd_ostream &OS) {
- OS << Func.Name << "\n";
- OS << "# Func Hash:\n" << Func.Hash << "\n";
+ OS << Name << "\n";
+ OS << "# Func Hash:\n" << Hash << "\n";
OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
OS << "# Counter Values:\n";
for (uint64_t Count : Func.Counts)
@@ -375,6 +384,6 @@ Error InstrProfWriter::writeText(raw_fd_ostream &OS) {
for (const auto &I : FunctionData)
if (shouldEncodeData(I.getValue()))
for (const auto &Func : I.getValue())
- writeRecordInText(Func.second, Symtab, OS);
+ writeRecordInText(I.getKey(), Func.first, Func.second, Symtab, OS);
return Error::success();
}
diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp
index 0345a5e3d2a1..50173f5256bf 100644
--- a/lib/Support/CommandLine.cpp
+++ b/lib/Support/CommandLine.cpp
@@ -1236,7 +1236,7 @@ bool CommandLineParser::ParseCommandLineOptions(int argc,
<< ": Not enough positional command line arguments specified!\n"
<< "Must specify at least " << NumPositionalRequired
<< " positional argument" << (NumPositionalRequired > 1 ? "s" : "")
- << ": See: " << argv[0] << " - help\n";
+ << ": See: " << argv[0] << " -help\n";
ErrorParsing = true;
} else if (!HasUnlimitedPositionals &&
diff --git a/lib/Support/DynamicLibrary.cpp b/lib/Support/DynamicLibrary.cpp
index 9398789cea87..d8422115eae8 100644
--- a/lib/Support/DynamicLibrary.cpp
+++ b/lib/Support/DynamicLibrary.cpp
@@ -14,6 +14,7 @@
#include "llvm/Support/DynamicLibrary.h"
#include "llvm-c/Support.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Config/config.h"
#include "llvm/Support/ManagedStatic.h"
@@ -73,19 +74,37 @@ public:
return true;
}
- void *Lookup(const char *Symbol) {
- // Process handle gets first try.
+ void *LibLookup(const char *Symbol, DynamicLibrary::SearchOrdering Order) {
+ if (Order & SO_LoadOrder) {
+ for (void *Handle : Handles) {
+ if (void *Ptr = DLSym(Handle, Symbol))
+ return Ptr;
+ }
+ } else {
+ for (void *Handle : llvm::reverse(Handles)) {
+ if (void *Ptr = DLSym(Handle, Symbol))
+ return Ptr;
+ }
+ }
+ return nullptr;
+ }
+
+ void *Lookup(const char *Symbol, DynamicLibrary::SearchOrdering Order) {
+ assert(!((Order & SO_LoadedFirst) && (Order & SO_LoadedLast)) &&
+ "Invalid Ordering");
+
+ if (!Process || (Order & SO_LoadedFirst)) {
+ if (void *Ptr = LibLookup(Symbol, Order))
+ return Ptr;
+ }
if (Process) {
+ // Use OS facilities to search the current binary and all loaded libs.
if (void *Ptr = DLSym(Process, Symbol))
return Ptr;
-#ifndef NDEBUG
- for (void *Handle : Handles)
- assert(!DLSym(Handle, Symbol) && "Symbol exists in non process handle");
-#endif
- } else {
- // Iterate in reverse, so newer libraries/symbols override older.
- for (auto &&I = Handles.rbegin(), E = Handles.rend(); I != E; ++I) {
- if (void *Ptr = DLSym(*I, Symbol))
+
+ // Search any libs that might have been skipped because of RTLD_LOCAL.
+ if (Order & SO_LoadedLast) {
+ if (void *Ptr = LibLookup(Symbol, Order))
return Ptr;
}
}
@@ -113,6 +132,8 @@ static llvm::ManagedStatic<llvm::sys::SmartMutex<true>> SymbolsMutex;
#endif
char DynamicLibrary::Invalid;
+DynamicLibrary::SearchOrdering DynamicLibrary::SearchOrder =
+ DynamicLibrary::SO_Linker;
namespace llvm {
void *SearchForAddressOfSpecialSymbol(const char *SymbolName) {
@@ -170,7 +191,7 @@ void *DynamicLibrary::SearchForAddressOfSymbol(const char *SymbolName) {
// Now search the libraries.
if (OpenedHandles.isConstructed()) {
- if (void *Ptr = OpenedHandles->Lookup(SymbolName))
+ if (void *Ptr = OpenedHandles->Lookup(SymbolName, SearchOrder))
return Ptr;
}
}
diff --git a/lib/Support/ErrorHandling.cpp b/lib/Support/ErrorHandling.cpp
index a7d3a18003ee..fe69151665c6 100644
--- a/lib/Support/ErrorHandling.cpp
+++ b/lib/Support/ErrorHandling.cpp
@@ -20,15 +20,14 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/Mutex.h"
-#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/WindowsError.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdlib>
+#include <mutex>
+#include <new>
#if defined(HAVE_UNISTD_H)
# include <unistd.h>
@@ -43,18 +42,25 @@ using namespace llvm;
static fatal_error_handler_t ErrorHandler = nullptr;
static void *ErrorHandlerUserData = nullptr;
-static ManagedStatic<sys::Mutex> ErrorHandlerMutex;
+static fatal_error_handler_t BadAllocErrorHandler = nullptr;
+static void *BadAllocErrorHandlerUserData = nullptr;
+
+// Mutexes to synchronize installing error handlers and calling error handlers.
+// Do not use ManagedStatic, or that may allocate memory while attempting to
+// report an OOM.
+static std::mutex ErrorHandlerMutex;
+static std::mutex BadAllocErrorHandlerMutex;
void llvm::install_fatal_error_handler(fatal_error_handler_t handler,
void *user_data) {
- llvm::MutexGuard Lock(*ErrorHandlerMutex);
+ std::lock_guard<std::mutex> Lock(ErrorHandlerMutex);
assert(!ErrorHandler && "Error handler already registered!\n");
ErrorHandler = handler;
ErrorHandlerUserData = user_data;
}
void llvm::remove_fatal_error_handler() {
- llvm::MutexGuard Lock(*ErrorHandlerMutex);
+ std::lock_guard<std::mutex> Lock(ErrorHandlerMutex);
ErrorHandler = nullptr;
ErrorHandlerUserData = nullptr;
}
@@ -77,7 +83,7 @@ void llvm::report_fatal_error(const Twine &Reason, bool GenCrashDiag) {
{
// Only acquire the mutex while reading the handler, so as not to invoke a
// user-supplied callback under a lock.
- llvm::MutexGuard Lock(*ErrorHandlerMutex);
+ std::lock_guard<std::mutex> Lock(ErrorHandlerMutex);
handler = ErrorHandler;
handlerData = ErrorHandlerUserData;
}
@@ -104,6 +110,48 @@ void llvm::report_fatal_error(const Twine &Reason, bool GenCrashDiag) {
exit(1);
}
+void llvm::install_bad_alloc_error_handler(fatal_error_handler_t handler,
+ void *user_data) {
+ std::lock_guard<std::mutex> Lock(BadAllocErrorHandlerMutex);
+ assert(!ErrorHandler && "Bad alloc error handler already registered!\n");
+ BadAllocErrorHandler = handler;
+ BadAllocErrorHandlerUserData = user_data;
+}
+
+void llvm::remove_bad_alloc_error_handler() {
+ std::lock_guard<std::mutex> Lock(BadAllocErrorHandlerMutex);
+ BadAllocErrorHandler = nullptr;
+ BadAllocErrorHandlerUserData = nullptr;
+}
+
+void llvm::report_bad_alloc_error(const char *Reason, bool GenCrashDiag) {
+ fatal_error_handler_t Handler = nullptr;
+ void *HandlerData = nullptr;
+ {
+ // Only acquire the mutex while reading the handler, so as not to invoke a
+ // user-supplied callback under a lock.
+ std::lock_guard<std::mutex> Lock(BadAllocErrorHandlerMutex);
+ Handler = BadAllocErrorHandler;
+ HandlerData = BadAllocErrorHandlerUserData;
+ }
+
+ if (Handler) {
+ Handler(HandlerData, Reason, GenCrashDiag);
+ llvm_unreachable("bad alloc handler should not return");
+ }
+
+#ifdef LLVM_ENABLE_EXCEPTIONS
+ // If exceptions are enabled, make OOM in malloc look like OOM in new.
+ throw std::bad_alloc();
+#else
+ // Don't call the normal error handler. It may allocate memory. Directly write
+ // an OOM to stderr and abort.
+ char OOMMessage[] = "LLVM ERROR: out of memory\n";
+ (void)::write(2, OOMMessage, strlen(OOMMessage));
+ abort();
+#endif
+}
+
void llvm::llvm_unreachable_internal(const char *msg, const char *file,
unsigned line) {
// This code intentionally doesn't call the ErrorHandler callback, because
diff --git a/lib/Support/Host.cpp b/lib/Support/Host.cpp
index 232efe648b03..9f22f89b3c9e 100644
--- a/lib/Support/Host.cpp
+++ b/lib/Support/Host.cpp
@@ -281,11 +281,17 @@ enum ProcessorVendors {
};
enum ProcessorTypes {
- INTEL_ATOM = 1,
+ INTEL_BONNELL = 1,
INTEL_CORE2,
INTEL_COREI7,
AMDFAM10H,
AMDFAM15H,
+ INTEL_SILVERMONT,
+ INTEL_KNL,
+ AMD_BTVER1,
+ AMD_BTVER2,
+ AMDFAM17H,
+ // Entries below this are not in libgcc/compiler-rt.
INTEL_i386,
INTEL_i486,
INTEL_PENTIUM,
@@ -295,16 +301,13 @@ enum ProcessorTypes {
INTEL_PENTIUM_IV,
INTEL_PENTIUM_M,
INTEL_CORE_DUO,
- INTEL_XEONPHI,
INTEL_X86_64,
INTEL_NOCONA,
INTEL_PRESCOTT,
AMD_i486,
AMDPENTIUM,
AMDATHLON,
- AMDFAM14H,
- AMDFAM16H,
- AMDFAM17H,
+ INTEL_GOLDMONT,
CPU_TYPE_MAX
};
@@ -317,34 +320,26 @@ enum ProcessorSubtypes {
AMDFAM10H_ISTANBUL,
AMDFAM15H_BDVER1,
AMDFAM15H_BDVER2,
- INTEL_PENTIUM_MMX,
- INTEL_CORE2_65,
- INTEL_CORE2_45,
+ AMDFAM15H_BDVER3,
+ AMDFAM15H_BDVER4,
+ AMDFAM17H_ZNVER1,
INTEL_COREI7_IVYBRIDGE,
INTEL_COREI7_HASWELL,
INTEL_COREI7_BROADWELL,
INTEL_COREI7_SKYLAKE,
INTEL_COREI7_SKYLAKE_AVX512,
- INTEL_ATOM_BONNELL,
- INTEL_ATOM_SILVERMONT,
- INTEL_ATOM_GOLDMONT,
- INTEL_KNIGHTS_LANDING,
+ // Entries below this are not in libgcc/compiler-rt.
+ INTEL_PENTIUM_MMX,
+ INTEL_CORE2_65,
+ INTEL_CORE2_45,
AMDPENTIUM_K6,
AMDPENTIUM_K62,
AMDPENTIUM_K63,
AMDPENTIUM_GEODE,
- AMDATHLON_TBIRD,
- AMDATHLON_MP,
+ AMDATHLON_CLASSIC,
AMDATHLON_XP,
+ AMDATHLON_K8,
AMDATHLON_K8SSE3,
- AMDATHLON_OPTERON,
- AMDATHLON_FX,
- AMDATHLON_64,
- AMD_BTVER1,
- AMD_BTVER2,
- AMDFAM15H_BDVER3,
- AMDFAM15H_BDVER4,
- AMDFAM17H_ZNVER1,
CPU_SUBTYPE_MAX
};
@@ -360,9 +355,28 @@ enum ProcessorFeatures {
FEATURE_SSE4_2,
FEATURE_AVX,
FEATURE_AVX2,
- FEATURE_AVX512,
- FEATURE_AVX512SAVE,
- FEATURE_MOVBE,
+ FEATURE_SSE4_A,
+ FEATURE_FMA4,
+ FEATURE_XOP,
+ FEATURE_FMA,
+ FEATURE_AVX512F,
+ FEATURE_BMI,
+ FEATURE_BMI2,
+ FEATURE_AES,
+ FEATURE_PCLMUL,
+ FEATURE_AVX512VL,
+ FEATURE_AVX512BW,
+ FEATURE_AVX512DQ,
+ FEATURE_AVX512CD,
+ FEATURE_AVX512ER,
+ FEATURE_AVX512PF,
+ FEATURE_AVX512VBMI,
+ FEATURE_AVX512IFMA,
+ FEATURE_AVX5124VNNIW,
+ FEATURE_AVX5124FMAPS,
+ FEATURE_AVX512VPOPCNTDQ,
+ // Only one bit free left in the first 32 features.
+ FEATURE_MOVBE = 32,
FEATURE_ADX,
FEATURE_EM64T
};
@@ -406,7 +420,6 @@ static bool isCpuIdSupported() {
/// the specified arguments. If we can't run cpuid on the host, return true.
static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
unsigned *rECX, unsigned *rEDX) {
-#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER)
#if defined(__GNUC__) || defined(__clang__)
#if defined(__x86_64__)
// gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
@@ -416,14 +429,16 @@ static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
"xchgq\t%%rbx, %%rsi\n\t"
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
: "a"(value));
+ return false;
#elif defined(__i386__)
__asm__("movl\t%%ebx, %%esi\n\t"
"cpuid\n\t"
"xchgl\t%%ebx, %%esi\n\t"
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
: "a"(value));
+ return false;
#else
- assert(0 && "This method is defined only for x86.");
+ return true;
#endif
#elif defined(_MSC_VER)
// The MSVC intrinsic is portable across x86 and x64.
@@ -433,7 +448,6 @@ static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
*rEBX = registers[1];
*rECX = registers[2];
*rEDX = registers[3];
-#endif
return false;
#else
return true;
@@ -446,16 +460,16 @@ static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
unsigned *rEAX, unsigned *rEBX, unsigned *rECX,
unsigned *rEDX) {
-#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER)
#if defined(__x86_64__) || defined(_M_X64)
#if defined(__GNUC__) || defined(__clang__)
- // gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually.
+ // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
// FIXME: should we save this for Clang?
__asm__("movq\t%%rbx, %%rsi\n\t"
"cpuid\n\t"
"xchgq\t%%rbx, %%rsi\n\t"
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
: "a"(value), "c"(subleaf));
+ return false;
#elif defined(_MSC_VER)
int registers[4];
__cpuidex(registers, value, subleaf);
@@ -463,6 +477,9 @@ static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
*rEBX = registers[1];
*rECX = registers[2];
*rEDX = registers[3];
+ return false;
+#else
+ return true;
#endif
#elif defined(__i386__) || defined(_M_IX86)
#if defined(__GNUC__) || defined(__clang__)
@@ -471,6 +488,7 @@ static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
"xchgl\t%%ebx, %%esi\n\t"
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
: "a"(value), "c"(subleaf));
+ return false;
#elif defined(_MSC_VER)
__asm {
mov eax,value
@@ -485,16 +503,16 @@ static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
mov esi,rEDX
mov dword ptr [esi],edx
}
-#endif
+ return false;
#else
- assert(0 && "This method is defined only for x86.");
+ return true;
#endif
- return false;
#else
return true;
#endif
}
+// Read control register 0 (XCR0). Used to detect features such as AVX.
static bool getX86XCR0(unsigned *rEAX, unsigned *rEDX) {
#if defined(__GNUC__) || defined(__clang__)
// Check xgetbv; this uses a .byte sequence instead of the instruction
@@ -526,9 +544,10 @@ static void detectX86FamilyModel(unsigned EAX, unsigned *Family,
}
static void
-getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
- unsigned int Brand_id, unsigned int Features,
- unsigned *Type, unsigned *Subtype) {
+getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
+ unsigned Brand_id, unsigned Features,
+ unsigned Features2, unsigned *Type,
+ unsigned *Subtype) {
if (Brand_id != 0)
return;
switch (Family) {
@@ -681,12 +700,7 @@ getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
// Skylake Xeon:
case 0x55:
*Type = INTEL_COREI7;
- // Check that we really have AVX512
- if (Features & (1 << FEATURE_AVX512)) {
- *Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512"
- } else {
- *Subtype = INTEL_COREI7_SKYLAKE; // "skylake"
- }
+ *Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512"
break;
case 0x1c: // Most 45 nm Intel Atom processors
@@ -694,8 +708,7 @@ getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
case 0x27: // 32 nm Atom Medfield
case 0x35: // 32 nm Atom Midview
case 0x36: // 32 nm Atom Midview
- *Type = INTEL_ATOM;
- *Subtype = INTEL_ATOM_BONNELL;
+ *Type = INTEL_BONNELL;
break; // "bonnell"
// Atom Silvermont codes from the Intel software optimization guide.
@@ -705,27 +718,23 @@ getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
case 0x5a:
case 0x5d:
case 0x4c: // really airmont
- *Type = INTEL_ATOM;
- *Subtype = INTEL_ATOM_SILVERMONT;
+ *Type = INTEL_SILVERMONT;
break; // "silvermont"
// Goldmont:
case 0x5c:
case 0x5f:
- *Type = INTEL_ATOM;
- *Subtype = INTEL_ATOM_GOLDMONT;
+ *Type = INTEL_GOLDMONT;
break; // "goldmont"
case 0x57:
- *Type = INTEL_XEONPHI; // knl
- *Subtype = INTEL_KNIGHTS_LANDING;
+ *Type = INTEL_KNL; // knl
break;
default: // Unknown family 6 CPU, try to guess.
- if (Features & (1 << FEATURE_AVX512)) {
- *Type = INTEL_XEONPHI; // knl
- *Subtype = INTEL_KNIGHTS_LANDING;
+ if (Features & (1 << FEATURE_AVX512F)) {
+ *Type = INTEL_KNL; // knl
break;
}
- if (Features & (1 << FEATURE_ADX)) {
+ if (Features2 & (1 << (FEATURE_ADX - 32))) {
*Type = INTEL_COREI7;
*Subtype = INTEL_COREI7_BROADWELL;
break;
@@ -741,9 +750,8 @@ getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
break;
}
if (Features & (1 << FEATURE_SSE4_2)) {
- if (Features & (1 << FEATURE_MOVBE)) {
- *Type = INTEL_ATOM;
- *Subtype = INTEL_ATOM_SILVERMONT;
+ if (Features2 & (1 << (FEATURE_MOVBE - 32))) {
+ *Type = INTEL_SILVERMONT;
} else {
*Type = INTEL_COREI7;
*Subtype = INTEL_COREI7_NEHALEM;
@@ -756,16 +764,15 @@ getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
break;
}
if (Features & (1 << FEATURE_SSSE3)) {
- if (Features & (1 << FEATURE_MOVBE)) {
- *Type = INTEL_ATOM;
- *Subtype = INTEL_ATOM_BONNELL; // "bonnell"
+ if (Features2 & (1 << (FEATURE_MOVBE - 32))) {
+ *Type = INTEL_BONNELL; // "bonnell"
} else {
*Type = INTEL_CORE2; // "core2"
*Subtype = INTEL_CORE2_65;
}
break;
}
- if (Features & (1 << FEATURE_EM64T)) {
+ if (Features2 & (1 << (FEATURE_EM64T - 32))) {
*Type = INTEL_X86_64;
break; // x86-64
}
@@ -796,8 +803,8 @@ getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
// Intel Xeon processor, Intel Xeon processor MP, Intel Celeron
// processor, and Mobile Intel Celeron processor. All processors
// are model 02h and manufactured using the 0.13 micron process.
- *Type =
- ((Features & (1 << FEATURE_EM64T)) ? INTEL_X86_64 : INTEL_PENTIUM_IV);
+ *Type = ((Features2 & (1 << (FEATURE_EM64T - 32))) ? INTEL_X86_64
+ : INTEL_PENTIUM_IV);
break;
case 3: // Pentium 4 processor, Intel Xeon processor, Intel Celeron D
@@ -811,13 +818,13 @@ getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
// Extreme Edition, Intel Xeon processor, Intel Xeon processor
// MP, Intel Celeron D processor. All processors are model 06h
// and manufactured using the 65 nm process.
- *Type =
- ((Features & (1 << FEATURE_EM64T)) ? INTEL_NOCONA : INTEL_PRESCOTT);
+ *Type = ((Features2 & (1 << (FEATURE_EM64T - 32))) ? INTEL_NOCONA
+ : INTEL_PRESCOTT);
break;
default:
- *Type =
- ((Features & (1 << FEATURE_EM64T)) ? INTEL_X86_64 : INTEL_PENTIUM_IV);
+ *Type = ((Features2 & (1 << (FEATURE_EM64T - 32))) ? INTEL_X86_64
+ : INTEL_PENTIUM_IV);
break;
}
break;
@@ -827,10 +834,8 @@ getIntelProcessorTypeAndSubtype(unsigned int Family, unsigned int Model,
}
}
-static void getAMDProcessorTypeAndSubtype(unsigned int Family,
- unsigned int Model,
- unsigned int Features,
- unsigned *Type,
+static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
+ unsigned Features, unsigned *Type,
unsigned *Subtype) {
// FIXME: this poorly matches the generated SubtargetFeatureKV table. There
// appears to be no way to generate the wide variety of AMD-specific targets
@@ -860,38 +865,20 @@ static void getAMDProcessorTypeAndSubtype(unsigned int Family,
break;
case 6:
*Type = AMDATHLON;
- switch (Model) {
- case 4:
- *Subtype = AMDATHLON_TBIRD;
- break; // "athlon-tbird"
- case 6:
- case 7:
- case 8:
- *Subtype = AMDATHLON_MP;
- break; // "athlon-mp"
- case 10:
+ if (Features & (1 << FEATURE_SSE)) {
*Subtype = AMDATHLON_XP;
break; // "athlon-xp"
}
- break;
+ *Subtype = AMDATHLON_CLASSIC;
+ break; // "athlon"
case 15:
*Type = AMDATHLON;
if (Features & (1 << FEATURE_SSE3)) {
*Subtype = AMDATHLON_K8SSE3;
break; // "k8-sse3"
}
- switch (Model) {
- case 1:
- *Subtype = AMDATHLON_OPTERON;
- break; // "opteron"
- case 5:
- *Subtype = AMDATHLON_FX;
- break; // "athlon-fx"; also opteron
- default:
- *Subtype = AMDATHLON_64;
- break; // "athlon64"
- }
- break;
+ *Subtype = AMDATHLON_K8;
+ break; // "k8"
case 16:
*Type = AMDFAM10H; // "amdfam10"
switch (Model) {
@@ -907,19 +894,13 @@ static void getAMDProcessorTypeAndSubtype(unsigned int Family,
}
break;
case 20:
- *Type = AMDFAM14H;
- *Subtype = AMD_BTVER1;
+ *Type = AMD_BTVER1;
break; // "btver1";
case 21:
*Type = AMDFAM15H;
- if (!(Features &
- (1 << FEATURE_AVX))) { // If no AVX support, provide a sane fallback.
- *Subtype = AMD_BTVER1;
- break; // "btver1"
- }
- if (Model >= 0x50 && Model <= 0x6f) {
+ if (Model >= 0x60 && Model <= 0x7f) {
*Subtype = AMDFAM15H_BDVER4;
- break; // "bdver4"; 50h-6Fh: Excavator
+ break; // "bdver4"; 60h-7Fh: Excavator
}
if (Model >= 0x30 && Model <= 0x3f) {
*Subtype = AMDFAM15H_BDVER3;
@@ -935,39 +916,52 @@ static void getAMDProcessorTypeAndSubtype(unsigned int Family,
}
break;
case 22:
- *Type = AMDFAM16H;
- if (!(Features &
- (1 << FEATURE_AVX))) { // If no AVX support provide a sane fallback.
- *Subtype = AMD_BTVER1;
- break; // "btver1";
- }
- *Subtype = AMD_BTVER2;
+ *Type = AMD_BTVER2;
break; // "btver2"
case 23:
*Type = AMDFAM17H;
- if (Features & (1 << FEATURE_ADX)) {
- *Subtype = AMDFAM17H_ZNVER1;
- break; // "znver1"
- }
- *Subtype = AMD_BTVER1;
+ *Subtype = AMDFAM17H_ZNVER1;
break;
default:
break; // "generic"
}
}
-static unsigned getAvailableFeatures(unsigned int ECX, unsigned int EDX,
- unsigned MaxLeaf) {
+static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
+ unsigned *FeaturesOut,
+ unsigned *Features2Out) {
unsigned Features = 0;
- unsigned int EAX, EBX;
- Features |= (((EDX >> 23) & 1) << FEATURE_MMX);
- Features |= (((EDX >> 25) & 1) << FEATURE_SSE);
- Features |= (((EDX >> 26) & 1) << FEATURE_SSE2);
- Features |= (((ECX >> 0) & 1) << FEATURE_SSE3);
- Features |= (((ECX >> 9) & 1) << FEATURE_SSSE3);
- Features |= (((ECX >> 19) & 1) << FEATURE_SSE4_1);
- Features |= (((ECX >> 20) & 1) << FEATURE_SSE4_2);
- Features |= (((ECX >> 22) & 1) << FEATURE_MOVBE);
+ unsigned Features2 = 0;
+ unsigned EAX, EBX;
+
+ if ((EDX >> 15) & 1)
+ Features |= 1 << FEATURE_CMOV;
+ if ((EDX >> 23) & 1)
+ Features |= 1 << FEATURE_MMX;
+ if ((EDX >> 25) & 1)
+ Features |= 1 << FEATURE_SSE;
+ if ((EDX >> 26) & 1)
+ Features |= 1 << FEATURE_SSE2;
+
+ if ((ECX >> 0) & 1)
+ Features |= 1 << FEATURE_SSE3;
+ if ((ECX >> 1) & 1)
+ Features |= 1 << FEATURE_PCLMUL;
+ if ((ECX >> 9) & 1)
+ Features |= 1 << FEATURE_SSSE3;
+ if ((ECX >> 12) & 1)
+ Features |= 1 << FEATURE_FMA;
+ if ((ECX >> 19) & 1)
+ Features |= 1 << FEATURE_SSE4_1;
+ if ((ECX >> 20) & 1)
+ Features |= 1 << FEATURE_SSE4_2;
+ if ((ECX >> 23) & 1)
+ Features |= 1 << FEATURE_POPCNT;
+ if ((ECX >> 25) & 1)
+ Features |= 1 << FEATURE_AES;
+
+ if ((ECX >> 22) & 1)
+ Features2 |= 1 << (FEATURE_MOVBE - 32);
// If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV
// indicates that the AVX registers will be saved and restored on context
@@ -976,20 +970,65 @@ static unsigned getAvailableFeatures(unsigned int ECX, unsigned int EDX,
bool HasAVX = ((ECX & AVXBits) == AVXBits) && !getX86XCR0(&EAX, &EDX) &&
((EAX & 0x6) == 0x6);
bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0);
+
+ if (HasAVX)
+ Features |= 1 << FEATURE_AVX;
+
bool HasLeaf7 =
MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX);
- bool HasADX = HasLeaf7 && ((EBX >> 19) & 1);
- bool HasAVX2 = HasAVX && HasLeaf7 && (EBX & 0x20);
- bool HasAVX512 = HasLeaf7 && HasAVX512Save && ((EBX >> 16) & 1);
- Features |= (HasAVX << FEATURE_AVX);
- Features |= (HasAVX2 << FEATURE_AVX2);
- Features |= (HasAVX512 << FEATURE_AVX512);
- Features |= (HasAVX512Save << FEATURE_AVX512SAVE);
- Features |= (HasADX << FEATURE_ADX);
-
- getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
- Features |= (((EDX >> 29) & 0x1) << FEATURE_EM64T);
- return Features;
+
+ if (HasLeaf7 && ((EBX >> 3) & 1))
+ Features |= 1 << FEATURE_BMI;
+ if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX)
+ Features |= 1 << FEATURE_AVX2;
+ if (HasLeaf7 && ((EBX >> 9) & 1))
+ Features |= 1 << FEATURE_BMI2;
+ if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512F;
+ if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512DQ;
+ if (HasLeaf7 && ((EBX >> 19) & 1))
+ Features2 |= 1 << (FEATURE_ADX - 32);
+ if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512IFMA;
+ if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512PF;
+ if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512ER;
+ if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512CD;
+ if (HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512BW;
+ if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512VL;
+
+ if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512VBMI;
+ if (HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX512VPOPCNTDQ;
+
+ if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX5124VNNIW;
+ if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save)
+ Features |= 1 << FEATURE_AVX5124FMAPS;
+
+ unsigned MaxExtLevel;
+ getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX);
+
+ bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 &&
+ !getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
+ if (HasExtLeaf1 && ((ECX >> 6) & 1))
+ Features |= 1 << FEATURE_SSE4_A;
+ if (HasExtLeaf1 && ((ECX >> 11) & 1))
+ Features |= 1 << FEATURE_XOP;
+ if (HasExtLeaf1 && ((ECX >> 16) & 1))
+ Features |= 1 << FEATURE_FMA4;
+
+ if (HasExtLeaf1 && ((EDX >> 29) & 1))
+ Features2 |= 1 << (FEATURE_EM64T - 32);
+
+ *FeaturesOut = Features;
+ *Features2Out = Features2;
}
StringRef sys::getHostCPUName() {
@@ -1004,23 +1043,22 @@ StringRef sys::getHostCPUName() {
if(!isCpuIdSupported())
return "generic";
#endif
- if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX))
- return "generic";
- if (getX86CpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX))
+ if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1)
return "generic";
+ getX86CpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX);
unsigned Brand_id = EBX & 0xff;
unsigned Family = 0, Model = 0;
- unsigned Features = 0;
+ unsigned Features = 0, Features2 = 0;
detectX86FamilyModel(EAX, &Family, &Model);
- Features = getAvailableFeatures(ECX, EDX, MaxLeaf);
+ getAvailableFeatures(ECX, EDX, MaxLeaf, &Features, &Features2);
unsigned Type;
unsigned Subtype;
if (Vendor == SIG_INTEL) {
- getIntelProcessorTypeAndSubtype(Family, Model, Brand_id, Features, &Type,
- &Subtype);
+ getIntelProcessorTypeAndSubtype(Family, Model, Brand_id, Features,
+ Features2, &Type, &Subtype);
switch (Type) {
case INTEL_i386:
return "i386";
@@ -1049,7 +1087,7 @@ StringRef sys::getHostCPUName() {
case INTEL_CORE2_45:
return "penryn";
default:
- return "core2";
+ llvm_unreachable("Unexpected subtype!");
}
case INTEL_COREI7:
switch (Subtype) {
@@ -1070,21 +1108,16 @@ StringRef sys::getHostCPUName() {
case INTEL_COREI7_SKYLAKE_AVX512:
return "skylake-avx512";
default:
- return "corei7";
+ llvm_unreachable("Unexpected subtype!");
}
- case INTEL_ATOM:
- switch (Subtype) {
- case INTEL_ATOM_BONNELL:
- return "bonnell";
- case INTEL_ATOM_GOLDMONT:
- return "goldmont";
- case INTEL_ATOM_SILVERMONT:
- return "silvermont";
- default:
- return "atom";
- }
- case INTEL_XEONPHI:
- return "knl"; /*update for more variants added*/
+ case INTEL_BONNELL:
+ return "bonnell";
+ case INTEL_SILVERMONT:
+ return "silvermont";
+ case INTEL_GOLDMONT:
+ return "goldmont";
+ case INTEL_KNL:
+ return "knl";
case INTEL_X86_64:
return "x86-64";
case INTEL_NOCONA:
@@ -1092,7 +1125,7 @@ StringRef sys::getHostCPUName() {
case INTEL_PRESCOTT:
return "prescott";
default:
- return "generic";
+ break;
}
} else if (Vendor == SIG_AMD) {
getAMDProcessorTypeAndSubtype(Family, Model, Features, &Type, &Subtype);
@@ -1114,31 +1147,24 @@ StringRef sys::getHostCPUName() {
}
case AMDATHLON:
switch (Subtype) {
- case AMDATHLON_TBIRD:
- return "athlon-tbird";
- case AMDATHLON_MP:
- return "athlon-mp";
+ case AMDATHLON_CLASSIC:
+ return "athlon";
case AMDATHLON_XP:
return "athlon-xp";
+ case AMDATHLON_K8:
+ return "k8";
case AMDATHLON_K8SSE3:
return "k8-sse3";
- case AMDATHLON_OPTERON:
- return "opteron";
- case AMDATHLON_FX:
- return "athlon-fx";
- case AMDATHLON_64:
- return "athlon64";
default:
- return "athlon";
+ llvm_unreachable("Unexpected subtype!");
}
case AMDFAM10H:
- if(Subtype == AMDFAM10H_BARCELONA)
- return "barcelona";
return "amdfam10";
- case AMDFAM14H:
+ case AMD_BTVER1:
return "btver1";
case AMDFAM15H:
switch (Subtype) {
+ default: // There are gaps in the subtype detection.
case AMDFAM15H_BDVER1:
return "bdver1";
case AMDFAM15H_BDVER2:
@@ -1147,31 +1173,13 @@ StringRef sys::getHostCPUName() {
return "bdver3";
case AMDFAM15H_BDVER4:
return "bdver4";
- case AMD_BTVER1:
- return "btver1";
- default:
- return "amdfam15";
- }
- case AMDFAM16H:
- switch (Subtype) {
- case AMD_BTVER1:
- return "btver1";
- case AMD_BTVER2:
- return "btver2";
- default:
- return "amdfam16";
}
+ case AMD_BTVER2:
+ return "btver2";
case AMDFAM17H:
- switch (Subtype) {
- case AMD_BTVER1:
- return "btver1";
- case AMDFAM17H_ZNVER1:
- return "znver1";
- default:
- return "amdfam17";
- }
+ return "znver1";
default:
- return "generic";
+ break;
}
}
return "generic";
@@ -1494,7 +1502,8 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) { return false; }
#endif
std::string sys::getProcessTriple() {
- Triple PT(Triple::normalize(LLVM_HOST_TRIPLE));
+ std::string TargetTripleString = updateTripleOSVersion(LLVM_HOST_TRIPLE);
+ Triple PT(Triple::normalize(TargetTripleString));
if (sizeof(void *) == 8 && PT.isArch32Bit())
PT = PT.get64BitArchVariant();
diff --git a/lib/Support/Mutex.cpp b/lib/Support/Mutex.cpp
index bdd02105f6f0..b1d5e7c0d991 100644
--- a/lib/Support/Mutex.cpp
+++ b/lib/Support/Mutex.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/Mutex.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Config/config.h"
//===----------------------------------------------------------------------===//
@@ -47,6 +48,10 @@ MutexImpl::MutexImpl( bool recursive)
// Declare the pthread_mutex data structures
pthread_mutex_t* mutex =
static_cast<pthread_mutex_t*>(malloc(sizeof(pthread_mutex_t)));
+
+ if (mutex == nullptr)
+ report_bad_alloc_error("Mutex allocation failed");
+
pthread_mutexattr_t attr;
// Initialize the mutex attributes
diff --git a/lib/Support/Unix/DynamicLibrary.inc b/lib/Support/Unix/DynamicLibrary.inc
index aad77f19c35a..f05103ccd1eb 100644
--- a/lib/Support/Unix/DynamicLibrary.inc
+++ b/lib/Support/Unix/DynamicLibrary.inc
@@ -20,6 +20,9 @@ DynamicLibrary::HandleSet::~HandleSet() {
::dlclose(Handle);
if (Process)
::dlclose(Process);
+
+ // llvm_shutdown called, Return to default
+ DynamicLibrary::SearchOrder = DynamicLibrary::SO_Linker;
}
void *DynamicLibrary::HandleSet::DLOpen(const char *File, std::string *Err) {
diff --git a/lib/Support/Unix/Host.inc b/lib/Support/Unix/Host.inc
index 0ba6a25aa198..5580e63893c6 100644
--- a/lib/Support/Unix/Host.inc
+++ b/lib/Support/Unix/Host.inc
@@ -34,18 +34,31 @@ static std::string getOSVersion() {
return info.release;
}
-std::string sys::getDefaultTargetTriple() {
- std::string TargetTripleString(LLVM_DEFAULT_TARGET_TRIPLE);
-
- // On darwin, we want to update the version to match that of the
- // target.
+static std::string updateTripleOSVersion(std::string TargetTripleString) {
+ // On darwin, we want to update the version to match that of the target.
std::string::size_type DarwinDashIdx = TargetTripleString.find("-darwin");
if (DarwinDashIdx != std::string::npos) {
TargetTripleString.resize(DarwinDashIdx + strlen("-darwin"));
TargetTripleString += getOSVersion();
+ return TargetTripleString;
+ }
+ std::string::size_type MacOSDashIdx = TargetTripleString.find("-macos");
+ if (MacOSDashIdx != std::string::npos) {
+ TargetTripleString.resize(MacOSDashIdx);
+ // Reset the OS to darwin as the OS version from `uname` doesn't use the
+ // macOS version scheme.
+ TargetTripleString += "-darwin";
+ TargetTripleString += getOSVersion();
}
+ return TargetTripleString;
+}
+
+std::string sys::getDefaultTargetTriple() {
+ std::string TargetTripleString =
+ updateTripleOSVersion(LLVM_DEFAULT_TARGET_TRIPLE);
- // Override the default target with an environment variable named by LLVM_TARGET_TRIPLE_ENV.
+ // Override the default target with an environment variable named by
+ // LLVM_TARGET_TRIPLE_ENV.
#if defined(LLVM_TARGET_TRIPLE_ENV)
if (const char *EnvTriple = std::getenv(LLVM_TARGET_TRIPLE_ENV))
TargetTripleString = EnvTriple;
diff --git a/lib/Support/Unix/Program.inc b/lib/Support/Unix/Program.inc
index 1704fa479942..c866d5b5a84e 100644
--- a/lib/Support/Unix/Program.inc
+++ b/lib/Support/Unix/Program.inc
@@ -40,9 +40,6 @@
#include <unistd.h>
#endif
#ifdef HAVE_POSIX_SPAWN
-#ifdef __sun__
-#define _RESTRICT_KYWD
-#endif
#include <spawn.h>
#if defined(__APPLE__)
diff --git a/lib/Support/Windows/DynamicLibrary.inc b/lib/Support/Windows/DynamicLibrary.inc
index caf1a0a658de..083ea902eeb2 100644
--- a/lib/Support/Windows/DynamicLibrary.inc
+++ b/lib/Support/Windows/DynamicLibrary.inc
@@ -28,6 +28,8 @@ DynamicLibrary::HandleSet::~HandleSet() {
// 'Process' should not be released on Windows.
assert((!Process || Process==this) && "Bad Handle");
+ // llvm_shutdown called, Return to default
+ DynamicLibrary::SearchOrder = DynamicLibrary::SO_Linker;
}
void *DynamicLibrary::HandleSet::DLOpen(const char *File, std::string *Err) {
diff --git a/lib/Support/Windows/Host.inc b/lib/Support/Windows/Host.inc
index 7e196cf0ce18..90a6fb316703 100644
--- a/lib/Support/Windows/Host.inc
+++ b/lib/Support/Windows/Host.inc
@@ -17,6 +17,10 @@
using namespace llvm;
+static std::string updateTripleOSVersion(std::string Triple) {
+ return Triple;
+}
+
std::string sys::getDefaultTargetTriple() {
const char *Triple = LLVM_DEFAULT_TARGET_TRIPLE;
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 4a7e0b2b803e..db1fbe069f4d 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -509,7 +509,7 @@ int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C,
assert(ChainBegin != ChainEnd && "Chain should contain instructions");
do {
--I;
- Units.accumulateBackward(*I);
+ Units.accumulate(*I);
} while (I != ChainBegin);
// Make sure we allocate in-order, to get the cheapest registers first.
diff --git a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
index 6f8dd3e3ac0c..b3b738584b40 100644
--- a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
+++ b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
@@ -113,7 +113,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
return Copy;
}
- // Create a virtal register in *TLSBaseAddrReg, and populate it by
+ // Create a virtual register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *setRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I.getParent()->getParent();
diff --git a/lib/Target/AArch64/AArch64CondBrTuning.cpp b/lib/Target/AArch64/AArch64CondBrTuning.cpp
index 0a948812ff33..51700f905979 100644
--- a/lib/Target/AArch64/AArch64CondBrTuning.cpp
+++ b/lib/Target/AArch64/AArch64CondBrTuning.cpp
@@ -167,6 +167,7 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
case AArch64::SUBWrs:
case AArch64::SUBWrx:
IsFlagSetting = false;
+ LLVM_FALLTHROUGH;
case AArch64::ADDSWri:
case AArch64::ADDSWrr:
case AArch64::ADDSWrs:
@@ -226,6 +227,7 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
case AArch64::SUBXrs:
case AArch64::SUBXrx:
IsFlagSetting = false;
+ LLVM_FALLTHROUGH;
case AArch64::ADDSXri:
case AArch64::ADDSXrr:
case AArch64::ADDSXrs:
diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp
index 7bf2097c17ce..3682b62d2b84 100644
--- a/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/lib/Target/AArch64/AArch64FastISel.cpp
@@ -2114,7 +2114,7 @@ bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr,
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type.");
- case MVT::i1: VTIsi1 = true;
+ case MVT::i1: VTIsi1 = true; LLVM_FALLTHROUGH;
case MVT::i8: Opc = OpcTable[Idx][0]; break;
case MVT::i16: Opc = OpcTable[Idx][1]; break;
case MVT::i32: Opc = OpcTable[Idx][2]; break;
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index aaf32a499bc3..60fde5caa339 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -8364,9 +8364,9 @@ static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
/// EXTR instruction extracts a contiguous chunk of bits from two existing
/// registers viewed as a high/low pair. This function looks for the pattern:
-/// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
-/// EXTR. Can't quite be done in TableGen because the two immediates aren't
-/// independent.
+/// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it
+/// with an EXTR. Can't quite be done in TableGen because the two immediates
+/// aren't independent.
static SDValue tryCombineToEXTR(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
SelectionDAG &DAG = DCI.DAG;
@@ -9531,7 +9531,7 @@ static SDValue performPostLD1Combine(SDNode *N,
return SDValue();
}
-/// Simplify \Addr given that the top byte of it is ignored by HW during
+/// Simplify ``Addr`` given that the top byte of it is ignored by HW during
/// address translation.
static bool performTBISimplification(SDValue Addr,
TargetLowering::DAGCombinerInfo &DCI,
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 314e89bbca86..dba3e4bdf82f 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1282,6 +1282,7 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
case AArch64CC::HI: // Z clear and C set
case AArch64CC::LS: // Z set or C clear
UsedFlags.Z = true;
+ LLVM_FALLTHROUGH;
case AArch64CC::HS: // C set
case AArch64CC::LO: // C clear
UsedFlags.C = true;
@@ -1300,6 +1301,7 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
case AArch64CC::GT: // Z clear, N and V the same
case AArch64CC::LE: // Z set, N and V differ
UsedFlags.Z = true;
+ LLVM_FALLTHROUGH;
case AArch64CC::GE: // N and V the same
case AArch64CC::LT: // N and V differ
UsedFlags.N = true;
@@ -3669,12 +3671,17 @@ enum class FMAInstKind { Default, Indexed, Accumulator };
/// F|MUL I=A,B,0
/// F|ADD R,I,C
/// ==> F|MADD R,A,B,C
+/// \param MF Containing MachineFunction
+/// \param MRI Register information
+/// \param TII Target information
/// \param Root is the F|ADD instruction
/// \param [out] InsInstrs is a vector of machine instructions and will
/// contain the generated madd instruction
/// \param IdxMulOpd is index of operand in Root that is the result of
/// the F|MUL. In the example above IdxMulOpd is 1.
/// \param MaddOpc the opcode fo the f|madd instruction
+/// \param RC Register class of operands
+/// \param kind of fma instruction (addressing mode) to be generated
static MachineInstr *
genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
@@ -3733,6 +3740,9 @@ genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
/// ADD R,I,Imm
/// ==> ORR V, ZR, Imm
/// ==> MADD R,A,B,V
+/// \param MF Containing MachineFunction
+/// \param MRI Register information
+/// \param TII Target information
/// \param Root is the ADD instruction
/// \param [out] InsInstrs is a vector of machine instructions and will
/// contain the generated madd instruction
@@ -3741,6 +3751,7 @@ genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
/// \param MaddOpc the opcode fo the madd instruction
/// \param VR is a virtual register that holds the value of an ADD operand
/// (V in the example above).
+/// \param RC Register class of operands
static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs,
@@ -4216,26 +4227,36 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
/// \brief Replace csincr-branch sequence by simple conditional branch
///
/// Examples:
-/// 1.
+/// 1. \code
/// csinc w9, wzr, wzr, <condition code>
/// tbnz w9, #0, 0x44
+/// \endcode
/// to
+/// \code
/// b.<inverted condition code>
+/// \endcode
///
-/// 2.
+/// 2. \code
/// csinc w9, wzr, wzr, <condition code>
/// tbz w9, #0, 0x44
+/// \endcode
/// to
+/// \code
/// b.<condition code>
+/// \endcode
///
/// Replace compare and branch sequence by TBZ/TBNZ instruction when the
/// compare's constant operand is power of 2.
///
/// Examples:
+/// \code
/// and w8, w8, #0x400
/// cbnz w8, L1
+/// \endcode
/// to
+/// \code
/// tbnz w8, #10, L1
+/// \endcode
///
/// \param MI Conditional Branch
/// \return True when the simple conditional branch is generated
@@ -4409,6 +4430,13 @@ AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
return makeArrayRef(TargetFlags);
}
+ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
+AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const {
+ static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
+ {{MOSuppressPair, "aarch64-suppress-pair"}};
+ return makeArrayRef(TargetFlags);
+}
+
unsigned AArch64InstrInfo::getOutliningBenefit(size_t SequenceSize,
size_t Occurrences,
bool CanBeTailCall) const {
diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h
index 58e9ce583d44..0809ede4df2a 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/lib/Target/AArch64/AArch64InstrInfo.h
@@ -263,8 +263,8 @@ public:
/// \param Pattern - combiner pattern
bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
/// Return true when there is potentially a faster code sequence
- /// for an instruction chain ending in <Root>. All potential patterns are
- /// listed in the <Patterns> array.
+ /// for an instruction chain ending in ``Root``. All potential patterns are
+ /// listed in the ``Patterns`` array.
bool getMachineCombinerPatterns(MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns)
const override;
@@ -289,6 +289,8 @@ public:
getSerializableDirectMachineOperandTargetFlags() const override;
ArrayRef<std::pair<unsigned, const char *>>
getSerializableBitmaskMachineOperandTargetFlags() const override;
+ ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
+ getSerializableMachineMemOperandTargetFlags() const override;
bool isFunctionSafeToOutlineFrom(MachineFunction &MF) const override;
unsigned getOutliningBenefit(size_t SequenceSize, size_t Occurrences,
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index 6cb723d187af..0be14673eb20 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -313,9 +313,6 @@ def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
//===----------------------------------------------------------------------===//
// AArch64 Instruction Predicate Definitions.
-def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
-def IsNotDarwin: Predicate<"!Subtarget->isTargetDarwin()">;
-
// We could compute these on a per-module basis but doing so requires accessing
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
@@ -714,10 +711,10 @@ def : InstAlias<"negs $dst, $src$shift",
defm UDIV : Div<0, "udiv", udiv>;
defm SDIV : Div<1, "sdiv", sdiv>;
-def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr $Rn, $Rm)>;
-def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr $Rn, $Rm)>;
-def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr $Rn, $Rm)>;
-def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr $Rn, $Rm)>;
+def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
+def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
+def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
+def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
// Variable shift
defm ASRV : Shift<0b10, "asr", sra>;
diff --git a/lib/Target/AArch64/AArch64InstructionSelector.cpp b/lib/Target/AArch64/AArch64InstructionSelector.cpp
index 07ce0e863c5e..7e275e4d2f46 100644
--- a/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -33,6 +33,8 @@
#define DEBUG_TYPE "aarch64-isel"
+#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
+
using namespace llvm;
#ifndef LLVM_BUILD_GLOBAL_ISEL
@@ -212,6 +214,7 @@ static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
return GenericOpc;
}
}
+ break;
case AArch64::FPRRegBankID:
switch (OpSize) {
case 32:
@@ -243,7 +246,8 @@ static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
return GenericOpc;
}
}
- };
+ break;
+ }
return GenericOpc;
}
@@ -267,6 +271,7 @@ static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
case 64:
return isStore ? AArch64::STRXui : AArch64::LDRXui;
}
+ break;
case AArch64::FPRRegBankID:
switch (OpSize) {
case 8:
@@ -278,7 +283,8 @@ static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
case 64:
return isStore ? AArch64::STRDui : AArch64::LDRDui;
}
- };
+ break;
+ }
return GenericOpc;
}
@@ -1319,6 +1325,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
case TargetOpcode::G_VASTART:
return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
: selectVaStartAAPCS(I, MF, MRI);
+ case TargetOpcode::G_IMPLICIT_DEF:
+ I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
+ return true;
}
return false;
diff --git a/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/lib/Target/AArch64/AArch64LegalizerInfo.cpp
index 4b568f3fba2b..4a0a7c36baf8 100644
--- a/lib/Target/AArch64/AArch64LegalizerInfo.cpp
+++ b/lib/Target/AArch64/AArch64LegalizerInfo.cpp
@@ -291,11 +291,10 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
unsigned DstPtr;
if (Align > PtrSize) {
// Realign the list to the actual required alignment.
- unsigned AlignMinus1 = MRI.createGenericVirtualRegister(IntPtrTy);
- MIRBuilder.buildConstant(AlignMinus1, Align - 1);
+ auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1);
unsigned ListTmp = MRI.createGenericVirtualRegister(PtrTy);
- MIRBuilder.buildGEP(ListTmp, List, AlignMinus1);
+ MIRBuilder.buildGEP(ListTmp, List, AlignMinus1->getOperand(0).getReg());
DstPtr = MRI.createGenericVirtualRegister(PtrTy);
MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
diff --git a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
index f3c8e7e9bdc2..4e65c0ab6011 100644
--- a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
+++ b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
@@ -163,6 +163,7 @@ AArch64RedundantCopyElimination::knownRegValInBlock(
case AArch64::ADDSWri:
case AArch64::ADDSXri:
IsCMN = true;
+ LLVM_FALLTHROUGH;
// CMP is an alias for SUBS with a dead destination register.
case AArch64::SUBSWri:
case AArch64::SUBSXri: {
diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp
index a9a9d5ce8429..a3238cf3b60f 100644
--- a/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -81,6 +81,7 @@ void AArch64Subtarget::initializeProperties() {
break;
case CortexA57:
MaxInterleaveFactor = 4;
+ PrefFunctionAlignment = 4;
break;
case ExynosM1:
MaxInterleaveFactor = 4;
@@ -130,7 +131,9 @@ void AArch64Subtarget::initializeProperties() {
break;
case CortexA35: break;
case CortexA53: break;
- case CortexA72: break;
+ case CortexA72:
+ PrefFunctionAlignment = 4;
+ break;
case CortexA73: break;
case Others: break;
}
diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h
index 7933e58c49ee..db53946cbc77 100644
--- a/lib/Target/AArch64/AArch64Subtarget.h
+++ b/lib/Target/AArch64/AArch64Subtarget.h
@@ -218,6 +218,13 @@ public:
bool hasArithmeticCbzFusion() const { return HasArithmeticCbzFusion; }
bool hasFuseAES() const { return HasFuseAES; }
bool hasFuseLiterals() const { return HasFuseLiterals; }
+
+ /// \brief Return true if the CPU supports any kind of instruction fusion.
+ bool hasFusion() const {
+ return hasArithmeticBccFusion() || hasArithmeticCbzFusion() ||
+ hasFuseAES() || hasFuseLiterals();
+ }
+
bool useRSqrt() const { return UseRSqrt; }
unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
unsigned getVectorInsertExtractBaseCost() const {
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index 1252f9403812..6237b8f3e7b9 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -277,17 +277,19 @@ public:
ScheduleDAGInstrs *
createMachineScheduler(MachineSchedContext *C) const override {
+ const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
ScheduleDAGMILive *DAG = createGenericSchedLive(C);
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
- DAG->addMutation(createAArch64MacroFusionDAGMutation());
+ if (ST.hasFusion())
+ DAG->addMutation(createAArch64MacroFusionDAGMutation());
return DAG;
}
ScheduleDAGInstrs *
createPostMachineScheduler(MachineSchedContext *C) const override {
const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
- if (ST.hasFuseAES() || ST.hasFuseLiterals()) {
+ if (ST.hasFusion()) {
// Run the Macro Fusion after RA again since literals are expanded from
// pseudos then (v. addPreSched2()).
ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index 0d860a7eef79..7870dce5c9c0 100644
--- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -756,7 +756,7 @@ static DecodeStatus DecodeThreeAddrSRegInstruction(llvm::MCInst &Inst,
// if shift == '11' then ReservedValue()
if (shiftHi == 0x3)
return Fail;
- // Deliberate fallthrough
+ LLVM_FALLTHROUGH;
case AArch64::ANDWrs:
case AArch64::ANDSWrs:
case AArch64::BICWrs:
@@ -780,7 +780,7 @@ static DecodeStatus DecodeThreeAddrSRegInstruction(llvm::MCInst &Inst,
// if shift == '11' then ReservedValue()
if (shiftHi == 0x3)
return Fail;
- // Deliberate fallthrough
+ LLVM_FALLTHROUGH;
case AArch64::ANDXrs:
case AArch64::ANDSXrs:
case AArch64::BICXrs:
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 475f91016840..a7a7daf4b4a5 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -73,7 +73,7 @@ public:
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override;
+ uint64_t Value, bool IsResolved) const override;
bool mayNeedRelaxation(const MCInst &Inst) const override;
bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
@@ -264,7 +264,7 @@ unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) con
void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
- bool IsPCRel) const {
+ bool IsResolved) const {
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
if (!Value)
return; // Doesn't change encoding.
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
index fc808ee0cdd6..c25bd8c8f6cc 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -103,4 +103,6 @@ AArch64MCAsmInfoELF::AArch64MCAsmInfoELF(const Triple &T) {
AArch64MCAsmInfoCOFF::AArch64MCAsmInfoCOFF() {
CommentString = ";";
+ PrivateGlobalPrefix = ".L";
+ PrivateLabelPrefix = ".L";
}
diff --git a/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp b/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
index 6f002860044c..ed5370826647 100644
--- a/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
@@ -108,10 +108,11 @@ bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
DFS(Start, Checklist);
for (auto &BB : Checklist) {
BasicBlock::iterator StartIt = (!L && (BB == Load->getParent())) ?
- BasicBlock::iterator(Load) : BB->end();
- if (MDR->getPointerDependencyFrom(MemoryLocation(Ptr),
- true, StartIt, BB, Load).isClobber())
- return true;
+ BasicBlock::iterator(Load) : BB->end();
+ auto Q = MDR->getPointerDependencyFrom(MemoryLocation(Ptr), true,
+ StartIt, BB, Load);
+ if (Q.isClobber() || Q.isUnknown())
+ return true;
}
return false;
}
diff --git a/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index b312dbc8d14d..31ee9206ae27 100644
--- a/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -380,7 +380,9 @@ bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) {
FastMathFlags FMF = FPOp->getFastMathFlags();
bool UnsafeDiv = HasUnsafeFPMath || FMF.unsafeAlgebra() ||
FMF.allowReciprocal();
- if (ST->hasFP32Denormals() && !UnsafeDiv)
+
+ // With UnsafeDiv node will be optimized to just rcp and mul.
+ if (ST->hasFP32Denormals() || UnsafeDiv)
return false;
IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()), FPMath);
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 96f819fd0e68..2553cf4da0fe 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -2651,8 +2651,11 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
return DAG.getZExtOrTrunc(Shl, SL, VT);
}
- case ISD::OR: if (!isOrEquivalentToAdd(DAG, LHS)) break;
- case ISD::ADD: { // Fall through from above
+ case ISD::OR:
+ if (!isOrEquivalentToAdd(DAG, LHS))
+ break;
+ LLVM_FALLTHROUGH;
+ case ISD::ADD: {
// shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1)
if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
SDValue Shl = DAG.getNode(ISD::SHL, SL, VT, LHS->getOperand(0),
diff --git a/lib/Target/AMDGPU/AMDGPULowerIntrinsics.cpp b/lib/Target/AMDGPU/AMDGPULowerIntrinsics.cpp
index 846e7dff5f8c..7e0e9802c0e6 100644
--- a/lib/Target/AMDGPU/AMDGPULowerIntrinsics.cpp
+++ b/lib/Target/AMDGPU/AMDGPULowerIntrinsics.cpp
@@ -10,6 +10,7 @@
#include "AMDGPU.h"
#include "AMDGPUSubtarget.h"
#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -34,9 +35,14 @@ public:
AMDGPULowerIntrinsics() : ModulePass(ID) {}
bool runOnModule(Module &M) override;
+ bool expandMemIntrinsicUses(Function &F);
StringRef getPassName() const override {
return "AMDGPU Lower Intrinsics";
}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ }
};
}
@@ -55,7 +61,7 @@ static bool shouldExpandOperationWithSize(Value *Size) {
return !CI || (CI->getZExtValue() > MaxStaticSize);
}
-static bool expandMemIntrinsicUses(Function &F) {
+bool AMDGPULowerIntrinsics::expandMemIntrinsicUses(Function &F) {
Intrinsic::ID ID = F.getIntrinsicID();
bool Changed = false;
@@ -67,7 +73,10 @@ static bool expandMemIntrinsicUses(Function &F) {
case Intrinsic::memcpy: {
auto *Memcpy = cast<MemCpyInst>(Inst);
if (shouldExpandOperationWithSize(Memcpy->getLength())) {
- expandMemCpyAsLoop(Memcpy);
+ Function *ParentFunc = Memcpy->getParent()->getParent();
+ const TargetTransformInfo &TTI =
+ getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*ParentFunc);
+ expandMemCpyAsLoop(Memcpy, TTI);
Changed = true;
Memcpy->eraseFromParent();
}
diff --git a/lib/Target/AMDGPU/AMDGPUMacroFusion.cpp b/lib/Target/AMDGPU/AMDGPUMacroFusion.cpp
new file mode 100644
index 000000000000..7263ba73d155
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUMacroFusion.cpp
@@ -0,0 +1,64 @@
+//===--- AMDGPUMacroFusion.cpp - AMDGPU Macro Fusion ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file contains the AMDGPU implementation of the DAG scheduling
+/// mutation to pair instructions back to back.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUMacroFusion.h"
+#include "AMDGPUSubtarget.h"
+#include "SIInstrInfo.h"
+
+#include "llvm/CodeGen/MacroFusion.h"
+
+using namespace llvm;
+
+namespace {
+
+/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused
+/// together. Given SecondMI, when FirstMI is unspecified, then check if
+/// SecondMI may be part of a fused pair at all.
+static bool shouldScheduleAdjacent(const TargetInstrInfo &TII_,
+ const TargetSubtargetInfo &TSI,
+ const MachineInstr *FirstMI,
+ const MachineInstr &SecondMI) {
+ const SIInstrInfo &TII = static_cast<const SIInstrInfo&>(TII_);
+
+ switch (SecondMI.getOpcode()) {
+ case AMDGPU::V_ADDC_U32_e64:
+ case AMDGPU::V_SUBB_U32_e64:
+ case AMDGPU::V_CNDMASK_B32_e64: {
+ // Try to cluster defs of condition registers to their uses. This improves
+ // the chance VCC will be available which will allow shrinking to VOP2
+ // encodings.
+ if (!FirstMI)
+ return true;
+
+ const MachineOperand *Src2 = TII.getNamedOperand(SecondMI,
+ AMDGPU::OpName::src2);
+ return FirstMI->definesRegister(Src2->getReg());
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+} // end namespace
+
+
+namespace llvm {
+
+std::unique_ptr<ScheduleDAGMutation> createAMDGPUMacroFusionDAGMutation () {
+ return createMacroFusionDAGMutation(shouldScheduleAdjacent);
+}
+
+} // end namespace llvm
diff --git a/lib/Target/AMDGPU/AMDGPUMacroFusion.h b/lib/Target/AMDGPU/AMDGPUMacroFusion.h
new file mode 100644
index 000000000000..844958580a65
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUMacroFusion.h
@@ -0,0 +1,19 @@
+//===- AMDGPUMacroFusion.h - AMDGPU Macro Fusion ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineScheduler.h"
+
+namespace llvm {
+
+/// Note that you have to add:
+/// DAG.addMutation(createAMDGPUMacroFusionDAGMutation());
+/// to AMDGPUPassConfig::createMachineScheduler() to have an effect.
+std::unique_ptr<ScheduleDAGMutation> createAMDGPUMacroFusionDAGMutation();
+
+} // llvm
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index be47b900c6f0..1bc5a52053ec 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -13,6 +13,14 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUSubtarget.h"
+#include "AMDGPU.h"
+#include "AMDGPUTargetMachine.h"
+#ifdef LLVM_BUILD_GLOBAL_ISEL
+#include "AMDGPUCallLowering.h"
+#include "AMDGPUInstructionSelector.h"
+#include "AMDGPULegalizerInfo.h"
+#include "AMDGPURegisterBankInfo.h"
+#endif
#include "SIMachineFunctionInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/MachineScheduler.h"
@@ -72,6 +80,31 @@ AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
return *this;
}
+#ifdef LLVM_BUILD_GLOBAL_ISEL
+namespace {
+
+struct SIGISelActualAccessor : public GISelAccessor {
+ std::unique_ptr<AMDGPUCallLowering> CallLoweringInfo;
+ std::unique_ptr<InstructionSelector> InstSelector;
+ std::unique_ptr<LegalizerInfo> Legalizer;
+ std::unique_ptr<RegisterBankInfo> RegBankInfo;
+ const AMDGPUCallLowering *getCallLowering() const override {
+ return CallLoweringInfo.get();
+ }
+ const InstructionSelector *getInstructionSelector() const override {
+ return InstSelector.get();
+ }
+ const LegalizerInfo *getLegalizerInfo() const override {
+ return Legalizer.get();
+ }
+ const RegisterBankInfo *getRegBankInfo() const override {
+ return RegBankInfo.get();
+ }
+};
+
+} // end anonymous namespace
+#endif
+
AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
const TargetMachine &TM)
: AMDGPUGenSubtargetInfo(TT, GPU, FS),
@@ -265,18 +298,21 @@ bool AMDGPUSubtarget::makeLIDRangeMetadata(Instruction *I) const {
case Intrinsic::amdgcn_workitem_id_x:
case Intrinsic::r600_read_tidig_x:
IdQuery = true;
+ LLVM_FALLTHROUGH;
case Intrinsic::r600_read_local_size_x:
Dim = 0;
break;
case Intrinsic::amdgcn_workitem_id_y:
case Intrinsic::r600_read_tidig_y:
IdQuery = true;
+ LLVM_FALLTHROUGH;
case Intrinsic::r600_read_local_size_y:
Dim = 1;
break;
case Intrinsic::amdgcn_workitem_id_z:
case Intrinsic::r600_read_tidig_z:
IdQuery = true;
+ LLVM_FALLTHROUGH;
case Intrinsic::r600_read_local_size_z:
Dim = 2;
break;
@@ -317,11 +353,23 @@ R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS,
TLInfo(TM, *this) {}
SISubtarget::SISubtarget(const Triple &TT, StringRef GPU, StringRef FS,
- const TargetMachine &TM) :
- AMDGPUSubtarget(TT, GPU, FS, TM),
- InstrInfo(*this),
- FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
- TLInfo(TM, *this) {}
+ const TargetMachine &TM)
+ : AMDGPUSubtarget(TT, GPU, FS, TM), InstrInfo(*this),
+ FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
+ TLInfo(TM, *this) {
+#ifndef LLVM_BUILD_GLOBAL_ISEL
+ GISelAccessor *GISel = new GISelAccessor();
+#else
+ SIGISelActualAccessor *GISel = new SIGISelActualAccessor();
+ GISel->CallLoweringInfo.reset(new AMDGPUCallLowering(*getTargetLowering()));
+ GISel->Legalizer.reset(new AMDGPULegalizerInfo());
+
+ GISel->RegBankInfo.reset(new AMDGPURegisterBankInfo(*getRegisterInfo()));
+ GISel->InstSelector.reset(new AMDGPUInstructionSelector(
+ *this, *static_cast<AMDGPURegisterBankInfo *>(GISel->RegBankInfo.get())));
+#endif
+ setGISelAccessor(*GISel);
+}
void SISubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
unsigned NumRegionInstrs) const {
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 425fd35d47de..dc868f010d85 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -19,9 +19,7 @@
#include "AMDGPUCallLowering.h"
#include "AMDGPUInstructionSelector.h"
#include "AMDGPULegalizerInfo.h"
-#ifdef LLVM_BUILD_GLOBAL_ISEL
-#include "AMDGPURegisterBankInfo.h"
-#endif
+#include "AMDGPUMacroFusion.h"
#include "AMDGPUTargetObjectFile.h"
#include "AMDGPUTargetTransformInfo.h"
#include "GCNIterativeScheduler.h"
@@ -85,7 +83,7 @@ static cl::opt<bool> EnableLoadStoreVectorizer(
static cl::opt<bool> ScalarizeGlobal(
"amdgpu-scalarize-global-loads",
cl::desc("Enable global load scalarization"),
- cl::init(false),
+ cl::init(true),
cl::Hidden);
// Option to run internalize pass.
@@ -176,6 +174,7 @@ createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
+ DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
return DAG;
}
@@ -389,31 +388,6 @@ const R600Subtarget *R600TargetMachine::getSubtargetImpl(
// GCN Target Machine (SI+)
//===----------------------------------------------------------------------===//
-#ifdef LLVM_BUILD_GLOBAL_ISEL
-namespace {
-
-struct SIGISelActualAccessor : public GISelAccessor {
- std::unique_ptr<AMDGPUCallLowering> CallLoweringInfo;
- std::unique_ptr<InstructionSelector> InstSelector;
- std::unique_ptr<LegalizerInfo> Legalizer;
- std::unique_ptr<RegisterBankInfo> RegBankInfo;
- const AMDGPUCallLowering *getCallLowering() const override {
- return CallLoweringInfo.get();
- }
- const InstructionSelector *getInstructionSelector() const override {
- return InstSelector.get();
- }
- const LegalizerInfo *getLegalizerInfo() const override {
- return Legalizer.get();
- }
- const RegisterBankInfo *getRegBankInfo() const override {
- return RegBankInfo.get();
- }
-};
-
-} // end anonymous namespace
-#endif
-
GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
TargetOptions Options,
@@ -435,21 +409,6 @@ const SISubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
// function that reside in TargetOptions.
resetTargetOptions(F);
I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, FS, *this);
-
-#ifndef LLVM_BUILD_GLOBAL_ISEL
- GISelAccessor *GISel = new GISelAccessor();
-#else
- SIGISelActualAccessor *GISel = new SIGISelActualAccessor();
- GISel->CallLoweringInfo.reset(
- new AMDGPUCallLowering(*I->getTargetLowering()));
- GISel->Legalizer.reset(new AMDGPULegalizerInfo());
-
- GISel->RegBankInfo.reset(new AMDGPURegisterBankInfo(*I->getRegisterInfo()));
- GISel->InstSelector.reset(new AMDGPUInstructionSelector(*I,
- *static_cast<AMDGPURegisterBankInfo*>(GISel->RegBankInfo.get())));
-#endif
-
- I->setGISelAccessor(*GISel);
}
I->setScalarizeGlobalBehavior(ScalarizeGlobal);
diff --git a/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 7b8756050b75..e3c90f250600 100644
--- a/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1058,17 +1058,13 @@ public:
OperandMatchResultTy parseOModOperand(OperandVector &Operands);
- void cvtId(MCInst &Inst, const OperandVector &Operands);
- void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
-
- void cvtVOP3Impl(MCInst &Inst,
- const OperandVector &Operands,
- OptionalImmIndexMap &OptionalIdx);
+ void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
+ OptionalImmIndexMap &OptionalIdx);
void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
- void cvtVOP3OMod(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
- void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
+ void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
+ bool IsAtomic = false);
void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
@@ -3870,13 +3866,19 @@ void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
// mimg
//===----------------------------------------------------------------------===//
-void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
+void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
+ bool IsAtomic) {
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
+ if (IsAtomic) {
+ // Add src, same as dst
+ ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
+ }
+
OptionalImmIndexMap OptionalIdx;
for (unsigned E = Operands.size(); I != E; ++I) {
@@ -3904,39 +3906,7 @@ void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
}
void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
- unsigned I = 1;
- const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
- for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
- ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
- }
-
- // Add src, same as dst
- ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
-
- OptionalImmIndexMap OptionalIdx;
-
- for (unsigned E = Operands.size(); I != E; ++I) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
-
- // Add the register arguments
- if (Op.isRegOrImm()) {
- Op.addRegOrImmOperands(Inst, 1);
- continue;
- } else if (Op.isImmModifier()) {
- OptionalIdx[Op.getImmTy()] = I;
- } else {
- llvm_unreachable("unexpected operand type");
- }
- }
-
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
+ cvtMIMG(Inst, Operands, true);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
@@ -4118,25 +4088,6 @@ OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
return MatchOperand_NoMatch;
}
-void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
- unsigned I = 1;
- const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
- for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
- ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
- }
- for (unsigned E = Operands.size(); I != E; ++I)
- ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
-}
-
-void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
- uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
- if (TSFlags & SIInstrFlags::VOP3) {
- cvtVOP3(Inst, Operands);
- } else {
- cvtId(Inst, Operands);
- }
-}
-
static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
// 1. This operand is input modifiers
return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
@@ -4148,91 +4099,78 @@ static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
&& Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
}
-void AMDGPUAsmParser::cvtVOP3Impl(MCInst &Inst, const OperandVector &Operands,
- OptionalImmIndexMap &OptionalIdx) {
+void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
+ OptionalImmIndexMap &OptionalIdx) {
+ unsigned Opc = Inst.getOpcode();
+
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
- for (unsigned E = Operands.size(); I != E; ++I) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
- if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
- Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
- } else if (Op.isImmModifier()) {
- OptionalIdx[Op.getImmTy()] = I;
- } else if (Op.isRegOrImm()) {
- Op.addRegOrImmOperands(Inst, 1);
- } else {
- llvm_unreachable("unhandled operand type");
+ if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
+ // This instruction has src modifiers
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
+ if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
+ Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
+ } else if (Op.isImmModifier()) {
+ OptionalIdx[Op.getImmTy()] = I;
+ } else if (Op.isRegOrImm()) {
+ Op.addRegOrImmOperands(Inst, 1);
+ } else {
+ llvm_unreachable("unhandled operand type");
+ }
+ }
+ } else {
+ // No src modifiers
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
+ if (Op.isMod()) {
+ OptionalIdx[Op.getImmTy()] = I;
+ } else {
+ Op.addRegOrImmOperands(Inst, 1);
+ }
}
}
-}
-void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
-
- cvtVOP3Impl(Inst, Operands, OptionalIdx);
+ if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
+ }
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
+ if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
+ }
// special case v_mac_{f16, f32}:
// it has src2 register operand that is tied to dst operand
// we don't allow modifiers for this operand in assembler so src2_modifiers
// should be 0
- if (Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
- Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
- Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi) {
+ if (Opc == AMDGPU::V_MAC_F32_e64_si || Opc == AMDGPU::V_MAC_F32_e64_vi ||
+ Opc == AMDGPU::V_MAC_F16_e64_vi) {
auto it = Inst.begin();
- std::advance(
- it,
- AMDGPU::getNamedOperandIdx(Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ?
- AMDGPU::V_MAC_F16_e64 :
- AMDGPU::V_MAC_F32_e64,
- AMDGPU::OpName::src2_modifiers));
+ std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
++it;
Inst.insert(it, Inst.getOperand(0)); // src2 = dst
}
}
-void AMDGPUAsmParser::cvtVOP3OMod(MCInst &Inst, const OperandVector &Operands) {
+void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptionalIdx;
-
- unsigned I = 1;
- const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
- for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
- ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
- }
-
- for (unsigned E = Operands.size(); I != E; ++I) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
- if (Op.isMod()) {
- OptionalIdx[Op.getImmTy()] = I;
- } else {
- Op.addRegOrImmOperands(Inst, 1);
- }
- }
-
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
+ cvtVOP3(Inst, Operands, OptionalIdx);
}
void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptIdx;
- cvtVOP3Impl(Inst, Operands, OptIdx);
+ cvtVOP3(Inst, Operands, OptIdx);
// FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
// instruction, and then figure out where to actually put the modifiers
int Opc = Inst.getOpcode();
- if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
- addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyClampSI);
- }
-
addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi, -1);
@@ -4284,7 +4222,7 @@ void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) {
int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
- Inst.getOperand(ModIdx).setImm(ModVal);
+ Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
}
}
diff --git a/lib/Target/AMDGPU/CMakeLists.txt b/lib/Target/AMDGPU/CMakeLists.txt
index 917d9cfa6905..971208c5db84 100644
--- a/lib/Target/AMDGPU/CMakeLists.txt
+++ b/lib/Target/AMDGPU/CMakeLists.txt
@@ -47,6 +47,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUIntrinsicInfo.cpp
AMDGPUISelDAGToDAG.cpp
AMDGPULowerIntrinsics.cpp
+ AMDGPUMacroFusion.cpp
AMDGPUMCInstLower.cpp
AMDGPUMachineCFGStructurizer.cpp
AMDGPUMachineFunction.cpp
diff --git a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
index 8ead48067336..2e7641cda375 100644
--- a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
+++ b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
@@ -17,7 +17,7 @@
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
namespace llvm {
std::vector<const SUnit*> makeMinRegSchedule(ArrayRef<const SUnit*> TopRoots,
diff --git a/lib/Target/AMDGPU/GCNMinRegStrategy.cpp b/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
index d378df674be9..0657f67b217d 100644
--- a/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
+++ b/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
@@ -15,7 +15,7 @@
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
namespace {
class GCNMinRegScheduler {
diff --git a/lib/Target/AMDGPU/GCNRegPressure.cpp b/lib/Target/AMDGPU/GCNRegPressure.cpp
index 390a8286c76a..1d02c7fdffbf 100644
--- a/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -16,7 +16,7 @@
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD
diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 8ec46665daf5..155b400ba022 100644
--- a/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -20,7 +20,7 @@
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/Support/MathExtras.h"
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
using namespace llvm;
diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.h b/lib/Target/AMDGPU/GCNSchedStrategy.h
index 3ed3cd5b3b1c..060d2ca72d93 100644
--- a/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -66,7 +66,7 @@ class GCNScheduleDAGMILive : public ScheduleDAGMILive {
const SIMachineFunctionInfo &MFI;
- // Occupancy target at the begining of function scheduling cycle.
+ // Occupancy target at the beginning of function scheduling cycle.
unsigned StartingOccupancy;
// Minimal real occupancy recorder for the function.
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
index 2b408ff10caa..a50e3eb8d9ce 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -32,7 +32,7 @@ public:
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override;
+ uint64_t Value, bool IsResolved) const override;
bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
const MCRelaxableFragment *DF,
const MCAsmLayout &Layout) const override {
@@ -100,7 +100,7 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
- bool IsPCRel) const {
+ bool IsResolved) const {
Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
if (!Value)
return; // Doesn't change encoding.
diff --git a/lib/Target/AMDGPU/MIMGInstructions.td b/lib/Target/AMDGPU/MIMGInstructions.td
index a515eecc222a..06e2c11b0193 100644
--- a/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/lib/Target/AMDGPU/MIMGInstructions.td
@@ -26,6 +26,7 @@ class MIMG_Helper <dag outs, dag ins, string asm,
let isAsmParserOnly = !if(!eq(dns,""), 1, 0);
let AsmMatchConverter = "cvtMIMG";
let usesCustomInserter = 1;
+ let SchedRW = [WriteVMEM];
}
class MIMG_NoSampler_Helper <bits<7> op, string asm,
diff --git a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index 6993e8a62a9c..00cbd24b84fb 100644
--- a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
+++ b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
@@ -555,7 +555,7 @@ public:
CFStack.pushBranch(AMDGPU::CF_PUSH_EG);
} else
CFStack.pushBranch(AMDGPU::CF_ALU_PUSH_BEFORE);
-
+ LLVM_FALLTHROUGH;
case AMDGPU::CF_ALU:
I = MI;
AluClauses.push_back(MakeALUClause(MBB, I));
diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp
index 215791f4f92d..69a63b6941ef 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -1618,7 +1618,8 @@ EVT R600TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
return VT.changeVectorElementTypeToInteger();
}
-bool R600TargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT) const {
+bool R600TargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
+ const SelectionDAG &DAG) const {
// Local and Private addresses do not handle vectors. Limit to i32
if ((AS == AMDGPUASI.LOCAL_ADDRESS || AS == AMDGPUASI.PRIVATE_ADDRESS)) {
return (MemVT.getSizeInBits() <= 32);
diff --git a/lib/Target/AMDGPU/R600ISelLowering.h b/lib/Target/AMDGPU/R600ISelLowering.h
index d6a0876a6ee7..2a774693f02b 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.h
+++ b/lib/Target/AMDGPU/R600ISelLowering.h
@@ -44,7 +44,8 @@ public:
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &,
EVT VT) const override;
- bool canMergeStoresTo(unsigned AS, EVT MemVT) const override;
+ bool canMergeStoresTo(unsigned AS, EVT MemVT,
+ const SelectionDAG &DAG) const override;
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
unsigned Align,
diff --git a/lib/Target/AMDGPU/R600MachineScheduler.cpp b/lib/Target/AMDGPU/R600MachineScheduler.cpp
index 47fda1c8fa82..a7e540f9d14d 100644
--- a/lib/Target/AMDGPU/R600MachineScheduler.cpp
+++ b/lib/Target/AMDGPU/R600MachineScheduler.cpp
@@ -22,7 +22,7 @@
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness");
diff --git a/lib/Target/AMDGPU/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp
index f391f67a241f..3af242d9ea66 100644
--- a/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -137,6 +137,7 @@ static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
= TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
}
+ return false;
}
default:
return false;
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index d39b345bdf03..2ba570b9ebbb 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -547,7 +547,7 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = 0;
const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
- Info.vol = !Vol || !Vol->isNullValue();
+ Info.vol = !Vol || !Vol->isZero();
Info.readMem = true;
Info.writeMem = true;
return true;
@@ -713,7 +713,8 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
}
}
-bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT) const {
+bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
+ const SelectionDAG &DAG) const {
if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
return (MemVT.getSizeInBits() <= 4 * 32);
} else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
@@ -2374,20 +2375,16 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N,
}
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
- switch (IID) {
- case Intrinsic::amdgcn_cvt_pkrtz: {
+ if (IID == Intrinsic::amdgcn_cvt_pkrtz) {
SDValue Src0 = N->getOperand(1);
SDValue Src1 = N->getOperand(2);
SDLoc SL(N);
SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
Src0, Src1);
-
Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
return;
}
- default:
- break;
- }
+ break;
}
case ISD::SELECT: {
SDLoc SL(N);
@@ -3736,7 +3733,9 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
EVT VT = Op.getValueType();
- bool Unsafe = DAG.getTarget().Options.UnsafeFPMath;
+ const SDNodeFlags Flags = Op->getFlags();
+ bool Unsafe = DAG.getTarget().Options.UnsafeFPMath ||
+ Flags.hasUnsafeAlgebra() || Flags.hasAllowReciprocal();
if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
return SDValue();
@@ -3771,15 +3770,11 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
}
}
- const SDNodeFlags Flags = Op->getFlags();
-
- if (Unsafe || Flags.hasAllowReciprocal()) {
+ if (Unsafe) {
// Turn into multiply by the reciprocal.
// x / y -> x * (1.0 / y)
- SDNodeFlags NewFlags;
- NewFlags.setUnsafeAlgebra(true);
SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
- return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, NewFlags);
+ return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
}
return SDValue();
@@ -4622,15 +4617,99 @@ SDValue SITargetLowering::performClassCombine(SDNode *N,
return SDValue();
}
+static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
+ if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
+ return true;
+
+ return DAG.isKnownNeverNaN(Op);
+}
+
+static bool isCanonicalized(SDValue Op, const SISubtarget *ST,
+ unsigned MaxDepth=5) {
+ // If source is a result of another standard FP operation it is already in
+ // canonical form.
+
+ switch (Op.getOpcode()) {
+ default:
+ break;
+
+ // These will flush denorms if required.
+ case ISD::FADD:
+ case ISD::FSUB:
+ case ISD::FMUL:
+ case ISD::FSQRT:
+ case ISD::FCEIL:
+ case ISD::FFLOOR:
+ case ISD::FMA:
+ case ISD::FMAD:
+
+ case ISD::FCANONICALIZE:
+ return true;
+
+ case ISD::FP_ROUND:
+ return Op.getValueType().getScalarType() != MVT::f16 ||
+ ST->hasFP16Denormals();
+
+ case ISD::FP_EXTEND:
+ return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 ||
+ ST->hasFP16Denormals();
+
+ case ISD::FP16_TO_FP:
+ case ISD::FP_TO_FP16:
+ return ST->hasFP16Denormals();
+
+ // It can/will be lowered or combined as a bit operation.
+ // Need to check their input recursively to handle.
+ case ISD::FNEG:
+ case ISD::FABS:
+ return (MaxDepth > 0) &&
+ isCanonicalized(Op.getOperand(0), ST, MaxDepth - 1);
+
+ case ISD::FSIN:
+ case ISD::FCOS:
+ case ISD::FSINCOS:
+ return Op.getValueType().getScalarType() != MVT::f16;
+
+ // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms.
+ // For such targets need to check their input recursively.
+ // TODO: on GFX9+ we could return true without checking provided no-nan
+ // mode, since canonicalization is also used to quiet sNaNs.
+ case ISD::FMINNUM:
+ case ISD::FMAXNUM:
+ case ISD::FMINNAN:
+ case ISD::FMAXNAN:
+
+ return (MaxDepth > 0) &&
+ isCanonicalized(Op.getOperand(0), ST, MaxDepth - 1) &&
+ isCanonicalized(Op.getOperand(1), ST, MaxDepth - 1);
+
+ case ISD::ConstantFP: {
+ auto F = cast<ConstantFPSDNode>(Op)->getValueAPF();
+ return !F.isDenormal() && !(F.isNaN() && F.isSignaling());
+ }
+ }
+ return false;
+}
+
// Constant fold canonicalize.
SDValue SITargetLowering::performFCanonicalizeCombine(
SDNode *N,
DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0));
- if (!CFP)
+
+ if (!CFP) {
+ SDValue N0 = N->getOperand(0);
+
+ bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
+
+ if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) &&
+ isCanonicalized(N0, getSubtarget()))
+ return N0;
+
return SDValue();
+ }
- SelectionDAG &DAG = DCI.DAG;
const APFloat &C = CFP->getValueAPF();
// Flush denormals to 0 if not enabled.
@@ -4723,13 +4802,6 @@ SDValue SITargetLowering::performIntMed3ImmCombine(
return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
}
-static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
- if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
- return true;
-
- return DAG.isKnownNeverNaN(Op);
-}
-
SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
const SDLoc &SL,
SDValue Op0,
diff --git a/lib/Target/AMDGPU/SIISelLowering.h b/lib/Target/AMDGPU/SIISelLowering.h
index 24f88e632d38..83392a7ab1b2 100644
--- a/lib/Target/AMDGPU/SIISelLowering.h
+++ b/lib/Target/AMDGPU/SIISelLowering.h
@@ -153,7 +153,8 @@ public:
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
unsigned AS) const override;
- bool canMergeStoresTo(unsigned AS, EVT MemVT) const override;
+ bool canMergeStoresTo(unsigned AS, EVT MemVT,
+ const SelectionDAG &DAG) const override;
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
unsigned Align,
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index b6784ec14e9f..160f8837d49c 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2022,10 +2022,12 @@ MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
return nullptr;
case AMDGPU::V_MAC_F16_e64:
IsF16 = true;
+ LLVM_FALLTHROUGH;
case AMDGPU::V_MAC_F32_e64:
break;
case AMDGPU::V_MAC_F16_e32:
IsF16 = true;
+ LLVM_FALLTHROUGH;
case AMDGPU::V_MAC_F32_e32: {
int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
AMDGPU::OpName::src0);
@@ -4320,6 +4322,24 @@ SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const
return new GCNHazardRecognizer(MF);
}
+std::pair<unsigned, unsigned>
+SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
+ return std::make_pair(TF & MO_MASK, TF & ~MO_MASK);
+}
+
+ArrayRef<std::pair<unsigned, const char *>>
+SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
+ static const std::pair<unsigned, const char *> TargetFlags[] = {
+ { MO_GOTPCREL, "amdgpu-gotprel" },
+ { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" },
+ { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" },
+ { MO_REL32_LO, "amdgpu-rel32-lo" },
+ { MO_REL32_HI, "amdgpu-rel32-hi" }
+ };
+
+ return makeArrayRef(TargetFlags);
+}
+
bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const {
return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY &&
MI.modifiesRegister(AMDGPU::EXEC, &RI);
diff --git a/lib/Target/AMDGPU/SIInstrInfo.h b/lib/Target/AMDGPU/SIInstrInfo.h
index 74b48c761808..d00c0d4a7f4e 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/lib/Target/AMDGPU/SIInstrInfo.h
@@ -100,6 +100,8 @@ protected:
public:
enum TargetOperandFlags {
+ MO_MASK = 0x7,
+
MO_NONE = 0,
// MO_GOTPCREL -> symbol@GOTPCREL -> R_AMDGPU_GOTPCREL.
MO_GOTPCREL = 1,
@@ -781,9 +783,15 @@ public:
void convertNonUniformLoopRegion(MachineBasicBlock *LoopEntry,
MachineBasicBlock *LoopEnd) const;
+ std::pair<unsigned, unsigned>
+ decomposeMachineOperandsTargetFlags(unsigned TF) const override;
+
ArrayRef<std::pair<int, const char *>>
getSerializableTargetIndices() const override;
+ ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableDirectMachineOperandTargetFlags() const override;
+
ScheduleHazardRecognizer *
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG) const override;
diff --git a/lib/Target/AMDGPU/SIInstrInfo.td b/lib/Target/AMDGPU/SIInstrInfo.td
index 4a81fb3b463a..ffb01363e131 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1502,6 +1502,8 @@ def VOP_B32_F16_F16 : VOPProfile <[i32, f16, f16, untyped]>;
def VOP_V2F16_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, v2f16]>;
def VOP_V2I16_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, v2i16]>;
+def VOP_F32_V2F16_V2F16_V2F16 : VOPProfile <[f32, v2f16, v2f16, v2f16]>;
+
def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>;
def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>;
diff --git a/lib/Target/AMDGPU/SIMachineScheduler.cpp b/lib/Target/AMDGPU/SIMachineScheduler.cpp
index bb17dbbdfbd6..34886c48f461 100644
--- a/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -38,7 +38,7 @@
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
// This scheduler implements a different scheduling algorithm than
// GenericScheduler.
diff --git a/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 96a18544f02a..874fbadca7f3 100644
--- a/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -110,10 +110,8 @@ static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
}
const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
- const MachineOperand *Src1Mod =
- TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
-
- if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
+ if (Src1 && (!isVGPR(Src1, TRI, MRI) ||
+ TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
return false;
// We don't need to check src0, all input types are legal, so just make sure
@@ -122,58 +120,64 @@ static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
return false;
// Check output modifiers
- if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
- return false;
-
- return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp);
+ return !TII->hasModifiersSet(MI, AMDGPU::OpName::omod) &&
+ !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp);
}
/// \brief This function checks \p MI for operands defined by a move immediate
/// instruction and then folds the literal constant into the instruction if it
-/// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
-/// and will only fold literal constants if we are still in SSA.
-static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
+/// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
+static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
MachineRegisterInfo &MRI, bool TryToCommute = true) {
-
- if (!MRI.isSSA())
- return;
-
assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
- // Only one literal constant is allowed per instruction, so if src0 is a
- // literal constant then we can't do any folding.
- if (TII->isLiteralConstant(MI, Src0Idx))
- return;
-
// Try to fold Src0
MachineOperand &Src0 = MI.getOperand(Src0Idx);
- if (Src0.isReg() && MRI.hasOneUse(Src0.getReg())) {
+ if (Src0.isReg()) {
unsigned Reg = Src0.getReg();
- MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
- if (Def && Def->isMoveImmediate()) {
- MachineOperand &MovSrc = Def->getOperand(1);
- bool ConstantFolded = false;
-
- if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
- isUInt<32>(MovSrc.getImm()))) {
- Src0.ChangeToImmediate(MovSrc.getImm());
- ConstantFolded = true;
- }
- if (ConstantFolded) {
- if (MRI.use_empty(Reg))
+ if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
+ MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
+ if (Def && Def->isMoveImmediate()) {
+ MachineOperand &MovSrc = Def->getOperand(1);
+ bool ConstantFolded = false;
+
+ if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
+ isUInt<32>(MovSrc.getImm()))) {
+ // It's possible to have only one component of a super-reg defined by
+ // a single mov, so we need to clear any subregister flag.
+ Src0.setSubReg(0);
+ Src0.ChangeToImmediate(MovSrc.getImm());
+ ConstantFolded = true;
+ } else if (MovSrc.isFI()) {
+ Src0.setSubReg(0);
+ Src0.ChangeToFrameIndex(MovSrc.getIndex());
+ ConstantFolded = true;
+ }
+
+ if (ConstantFolded) {
+ assert(MRI.use_empty(Reg));
Def->eraseFromParent();
- ++NumLiteralConstantsFolded;
- return;
+ ++NumLiteralConstantsFolded;
+ return true;
+ }
}
}
}
// We have failed to fold src0, so commute the instruction and try again.
- if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(MI))
- foldImmediates(MI, TII, MRI, false);
+ if (TryToCommute && MI.isCommutable()) {
+ if (TII->commuteInstruction(MI)) {
+ if (foldImmediates(MI, TII, MRI, false))
+ return true;
+ // Commute back.
+ TII->commuteInstruction(MI);
+ }
+ }
+
+ return false;
}
// Copy MachineOperand with all flags except setting it as implicit.
diff --git a/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp b/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
index 9908fc003ce7..92fb762ebd73 100644
--- a/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
+++ b/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
@@ -16,7 +16,7 @@
using namespace llvm;
-/// \brief The target which suports all AMD GPUs. This will eventually
+/// \brief The target which supports all AMD GPUs. This will eventually
/// be deprecated and there will be a R600 target and a GCN target.
Target &llvm::getTheAMDGPUTarget() {
static Target TheAMDGPUTarget;
diff --git a/lib/Target/AMDGPU/VOP3PInstructions.td b/lib/Target/AMDGPU/VOP3PInstructions.td
index 96d343099132..f2de1f995726 100644
--- a/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -16,12 +16,21 @@ class VOP3PInst<string OpName, VOPProfile P, SDPatternOperator node = null_frag>
!if(P.HasModifiers, getVOP3PModPat<P, node>.ret, getVOP3Pat<P, node>.ret)
>;
-// Non-packed instructions that use the VOP3P encoding. i.e. where
-// omod/abs are used.
+// Non-packed instructions that use the VOP3P encoding.
+// VOP3 neg/abs and VOP3P opsel/opsel_hi modifiers are allowed.
class VOP3_VOP3PInst<string OpName, VOPProfile P, SDPatternOperator node = null_frag> :
- VOP3P_Pseudo<OpName, P,
- !if(P.HasModifiers, getVOP3ModPat<P, node>.ret, getVOP3Pat<P, node>.ret)
->;
+ VOP3P_Pseudo<OpName, P> {
+ let InOperandList =
+ (ins
+ FP32InputMods:$src0_modifiers, VCSrc_f32:$src0,
+ FP32InputMods:$src1_modifiers, VCSrc_f32:$src1,
+ FP32InputMods:$src2_modifiers, VCSrc_f32:$src2,
+ clampmod:$clamp,
+ op_sel:$op_sel,
+ op_sel_hi:$op_sel_hi);
+ let AsmOperands =
+ " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$op_sel$op_sel_hi$clamp";
+}
let isCommutable = 1 in {
def V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, fma>;
@@ -46,9 +55,12 @@ def V_PK_ASHRREV_I16 : VOP3PInst<"v_pk_ashrrev_i16", VOP3_Profile<VOP_V2I16_V2I1
def V_PK_LSHRREV_B16 : VOP3PInst<"v_pk_lshrrev_b16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, lshr_rev>;
// XXX - Commutable?
-def V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
-def V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3_Profile<VOP_F16_F16_F16_F16>>;
-def V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3_Profile<VOP_F16_F16_F16_F16>>;
+// These are VOP3a-like opcodes which accept no omod.
+// Size of src arguments (16/32) is controlled by op_sel.
+// For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi.
+def V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3_Profile<VOP_F32_V2F16_V2F16_V2F16>>;
+def V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16_V2F16>>;
+def V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16_V2F16>>;
multiclass VOP3P_Real_vi<bits<10> op> {
diff --git a/lib/Target/AMDGPU/VOPInstructions.td b/lib/Target/AMDGPU/VOPInstructions.td
index e386f21c2ba4..77b7952b22a8 100644
--- a/lib/Target/AMDGPU/VOPInstructions.td
+++ b/lib/Target/AMDGPU/VOPInstructions.td
@@ -51,12 +51,8 @@ class VOP3Common <dag outs, dag ins, string asm = "",
let VOP3 = 1;
- let AsmMatchConverter =
- !if(!eq(VOP3Only,1),
- "cvtVOP3",
- !if(!eq(HasMods,1), "cvtVOP3_2_mod", ""));
-
let AsmVariantName = AMDGPUAsmVariants.VOP3;
+ let AsmMatchConverter = !if(!eq(HasMods,1), "cvtVOP3", "");
let isCodeGenOnly = 0;
@@ -106,13 +102,11 @@ class VOP3_Pseudo <string opName, VOPProfile P, list<dag> pattern = [],
let AsmVariantName = AMDGPUAsmVariants.VOP3;
let AsmMatchConverter =
- !if(!eq(VOP3Only,1),
- !if(!and(P.IsPacked, isVOP3P), "cvtVOP3P", "cvtVOP3"),
- !if(!eq(P.HasModifiers, 1),
- "cvtVOP3_2_mod",
- !if(!eq(P.HasOMod, 1), "cvtVOP3OMod", "")
- )
- );
+ !if(!and(P.IsPacked, isVOP3P),
+ "cvtVOP3P",
+ !if(!or(P.HasModifiers, P.HasOMod),
+ "cvtVOP3",
+ ""));
VOPProfile Pfl = P;
}
diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp
index 90f635c81254..582153daebde 100644
--- a/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -1103,6 +1103,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
case ARM::tPUSH:
// Special case here: no src & dst reg, but two extra imp ops.
StartOp = 2; NumOffset = 2;
+ LLVM_FALLTHROUGH;
case ARM::STMDB_UPD:
case ARM::t2STMDB_UPD:
case ARM::VSTMDDB_UPD:
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 1ec6b24b2ed6..3cf5950a1918 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1880,6 +1880,9 @@ isProfitableToIfCvt(MachineBasicBlock &TBB,
// Diamond: TBB is the block that is branched to, FBB is the fallthrough
TUnpredCycles = TCycles + TakenBranchCost;
FUnpredCycles = FCycles + NotTakenBranchCost;
+ // The branch at the end of FBB will disappear when it's predicated, so
+ // discount it from PredCost.
+ PredCost -= 1 * ScalingUpFactor;
}
// The total cost is the cost of each path scaled by their probabilites
unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor);
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index b4fb292c0116..e97a7ce5067f 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -193,10 +193,11 @@ getReservedRegs(const MachineFunction &MF) const {
for (unsigned R = 0; R < 16; ++R)
markSuperRegs(Reserved, ARM::D16 + R);
}
- const TargetRegisterClass *RC = &ARM::GPRPairRegClass;
- for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I)
- for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI)
- if (Reserved.test(*SI)) markSuperRegs(Reserved, *I);
+ const TargetRegisterClass &RC = ARM::GPRPairRegClass;
+ for (unsigned Reg : RC)
+ for (MCSubRegIterator SI(Reg, this); SI.isValid(); ++SI)
+ if (Reserved.test(*SI))
+ markSuperRegs(Reserved, Reg);
assert(checkAllSuperRegsMarked(Reserved));
return Reserved;
@@ -315,8 +316,7 @@ ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg,
Hints.push_back(PairedPhys);
// Then prefer even or odd registers.
- for (unsigned I = 0, E = Order.size(); I != E; ++I) {
- unsigned Reg = Order[I];
+ for (unsigned Reg : Order) {
if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd)
continue;
// Don't provide hints that are paired to a reserved register.
@@ -659,11 +659,8 @@ bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, unsigned Ba
const MCInstrDesc &Desc = MI->getDesc();
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
unsigned i = 0;
-
- while (!MI->getOperand(i).isFI()) {
- ++i;
- assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
- }
+ for (; !MI->getOperand(i).isFI(); ++i)
+ assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!");
// AddrMode4 and AddrMode6 cannot handle any offset.
if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
diff --git a/lib/Target/ARM/ARMCallLowering.cpp b/lib/Target/ARM/ARMCallLowering.cpp
index e498f70b820d..051827a6a6a2 100644
--- a/lib/Target/ARM/ARMCallLowering.cpp
+++ b/lib/Target/ARM/ARMCallLowering.cpp
@@ -321,7 +321,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
- // The necesary extensions are handled on the other side of the ABI
+ // The necessary extensions are handled on the other side of the ABI
// boundary.
markPhysRegUsed(PhysReg);
MIRBuilder.buildCopy(ValVReg, PhysReg);
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index e42514acd76f..6ba7593543a9 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -3398,9 +3398,9 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) {
SDLoc dl(Op);
- ConstantSDNode *ScopeN = cast<ConstantSDNode>(Op.getOperand(2));
- auto Scope = static_cast<SynchronizationScope>(ScopeN->getZExtValue());
- if (Scope == SynchronizationScope::SingleThread)
+ ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
+ auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue());
+ if (SSID == SyncScope::SingleThread)
return Op;
if (!Subtarget->hasDataBarrier()) {
@@ -5356,15 +5356,15 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
// Integer comparisons.
switch (SetCCOpcode) {
default: llvm_unreachable("Illegal integer comparison");
- case ISD::SETNE: Invert = true;
+ case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH;
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
- case ISD::SETLT: Swap = true;
+ case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETGT: Opc = ARMISD::VCGT; break;
- case ISD::SETLE: Swap = true;
+ case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETGE: Opc = ARMISD::VCGE; break;
- case ISD::SETULT: Swap = true;
+ case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
- case ISD::SETULE: Swap = true;
+ case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
}
@@ -13779,7 +13779,9 @@ bool ARMTargetLowering::lowerInterleavedLoad(
// Convert the integer vector to pointer vector if the element is pointer.
if (EltTy->isPointerTy())
- SubVec = Builder.CreateIntToPtr(SubVec, SV->getType());
+ SubVec = Builder.CreateIntToPtr(
+ SubVec, VectorType::get(SV->getType()->getVectorElementType(),
+ VecTy->getVectorNumElements()));
SubVecs[SV].push_back(SubVec);
}
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 5044134f5b1e..f05b14255236 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -510,7 +510,8 @@ class InstrItineraryData;
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
unsigned &Cost) const override;
- bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT) const override {
+ bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
+ const SelectionDAG &DAG) const override {
// Do not merge to larger than i32.
return (MemVT.getSizeInBits() <= 32);
}
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index 53db5acbe805..42eac12e457b 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -4799,7 +4799,7 @@ def : t2InstAlias<"add${p} $Rd, pc, $imm",
// Pseudo instruction ldr Rt, =immediate
def t2LDRConstPool
: t2AsmPseudo<"ldr${p} $Rt, $immediate",
- (ins GPRnopc:$Rt, const_pool_asm_imm:$immediate, pred:$p)>;
+ (ins GPR:$Rt, const_pool_asm_imm:$immediate, pred:$p)>;
// Version w/ the .w suffix.
def : t2InstAlias<"ldr${p}.w $Rt, $immediate",
(t2LDRConstPool GPRnopc:$Rt,
diff --git a/lib/Target/ARM/ARMInstructionSelector.cpp b/lib/Target/ARM/ARMInstructionSelector.cpp
index 374176d1d737..29ef69ad0010 100644
--- a/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -20,6 +20,8 @@
#define DEBUG_TYPE "arm-isel"
+#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
+
using namespace llvm;
#ifndef LLVM_BUILD_GLOBAL_ISEL
@@ -42,13 +44,32 @@ public:
private:
bool selectImpl(MachineInstr &I) const;
- bool selectICmp(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII,
- MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
- const RegisterBankInfo &RBI) const;
+ struct CmpConstants;
+ struct InsertInfo;
+
+ bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB,
+ MachineRegisterInfo &MRI) const;
+
+ // Helper for inserting a comparison sequence that sets \p ResReg to either 1
+ // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or
+ // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS).
+ bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg,
+ ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg,
+ unsigned PrevRes) const;
+
+ // Set \p DestReg to \p Constant.
+ void putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const;
+
+ bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const;
+
+ // Check if the types match and both operands have the expected size and
+ // register bank.
+ bool validOpRegPair(MachineRegisterInfo &MRI, unsigned LHS, unsigned RHS,
+ unsigned ExpectedSize, unsigned ExpectedRegBankID) const;
- bool selectSelect(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII,
- MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
- const RegisterBankInfo &RBI) const;
+ // Check if the register has the expected size and register bank.
+ bool validReg(MachineRegisterInfo &MRI, unsigned Reg, unsigned ExpectedSize,
+ unsigned ExpectedRegBankID) const;
const ARMBaseInstrInfo &TII;
const ARMBaseRegisterInfo &TRI;
@@ -251,120 +272,233 @@ static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank,
return Opc;
}
-static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
+// When lowering comparisons, we sometimes need to perform two compares instead
+// of just one. Get the condition codes for both comparisons. If only one is
+// needed, the second member of the pair is ARMCC::AL.
+static std::pair<ARMCC::CondCodes, ARMCC::CondCodes>
+getComparePreds(CmpInst::Predicate Pred) {
+ std::pair<ARMCC::CondCodes, ARMCC::CondCodes> Preds = {ARMCC::AL, ARMCC::AL};
switch (Pred) {
- // Needs two compares...
case CmpInst::FCMP_ONE:
+ Preds = {ARMCC::GT, ARMCC::MI};
+ break;
case CmpInst::FCMP_UEQ:
- default:
- // AL is our "false" for now. The other two need more compares.
- return ARMCC::AL;
+ Preds = {ARMCC::EQ, ARMCC::VS};
+ break;
case CmpInst::ICMP_EQ:
case CmpInst::FCMP_OEQ:
- return ARMCC::EQ;
+ Preds.first = ARMCC::EQ;
+ break;
case CmpInst::ICMP_SGT:
case CmpInst::FCMP_OGT:
- return ARMCC::GT;
+ Preds.first = ARMCC::GT;
+ break;
case CmpInst::ICMP_SGE:
case CmpInst::FCMP_OGE:
- return ARMCC::GE;
+ Preds.first = ARMCC::GE;
+ break;
case CmpInst::ICMP_UGT:
case CmpInst::FCMP_UGT:
- return ARMCC::HI;
+ Preds.first = ARMCC::HI;
+ break;
case CmpInst::FCMP_OLT:
- return ARMCC::MI;
+ Preds.first = ARMCC::MI;
+ break;
case CmpInst::ICMP_ULE:
case CmpInst::FCMP_OLE:
- return ARMCC::LS;
+ Preds.first = ARMCC::LS;
+ break;
case CmpInst::FCMP_ORD:
- return ARMCC::VC;
+ Preds.first = ARMCC::VC;
+ break;
case CmpInst::FCMP_UNO:
- return ARMCC::VS;
+ Preds.first = ARMCC::VS;
+ break;
case CmpInst::FCMP_UGE:
- return ARMCC::PL;
+ Preds.first = ARMCC::PL;
+ break;
case CmpInst::ICMP_SLT:
case CmpInst::FCMP_ULT:
- return ARMCC::LT;
+ Preds.first = ARMCC::LT;
+ break;
case CmpInst::ICMP_SLE:
case CmpInst::FCMP_ULE:
- return ARMCC::LE;
+ Preds.first = ARMCC::LE;
+ break;
case CmpInst::FCMP_UNE:
case CmpInst::ICMP_NE:
- return ARMCC::NE;
+ Preds.first = ARMCC::NE;
+ break;
case CmpInst::ICMP_UGE:
- return ARMCC::HS;
+ Preds.first = ARMCC::HS;
+ break;
case CmpInst::ICMP_ULT:
- return ARMCC::LO;
+ Preds.first = ARMCC::LO;
+ break;
+ default:
+ break;
}
+ assert(Preds.first != ARMCC::AL && "No comparisons needed?");
+ return Preds;
}
-bool ARMInstructionSelector::selectICmp(MachineInstrBuilder &MIB,
- const ARMBaseInstrInfo &TII,
- MachineRegisterInfo &MRI,
- const TargetRegisterInfo &TRI,
- const RegisterBankInfo &RBI) const {
- auto &MBB = *MIB->getParent();
- auto InsertBefore = std::next(MIB->getIterator());
- auto &DebugLoc = MIB->getDebugLoc();
-
- // Move 0 into the result register.
- auto Mov0I = BuildMI(MBB, InsertBefore, DebugLoc, TII.get(ARM::MOVi))
- .addDef(MRI.createVirtualRegister(&ARM::GPRRegClass))
- .addImm(0)
- .add(predOps(ARMCC::AL))
- .add(condCodeOp());
- if (!constrainSelectedInstRegOperands(*Mov0I, TII, TRI, RBI))
+struct ARMInstructionSelector::CmpConstants {
+ CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned OpRegBank,
+ unsigned OpSize)
+ : ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode),
+ OperandRegBankID(OpRegBank), OperandSize(OpSize) {}
+
+ // The opcode used for performing the comparison.
+ const unsigned ComparisonOpcode;
+
+ // The opcode used for reading the flags set by the comparison. May be
+ // ARM::INSTRUCTION_LIST_END if we don't need to read the flags.
+ const unsigned ReadFlagsOpcode;
+
+ // The assumed register bank ID for the operands.
+ const unsigned OperandRegBankID;
+
+ // The assumed size in bits for the operands.
+ const unsigned OperandSize;
+};
+
+struct ARMInstructionSelector::InsertInfo {
+ InsertInfo(MachineInstrBuilder &MIB)
+ : MBB(*MIB->getParent()), InsertBefore(std::next(MIB->getIterator())),
+ DbgLoc(MIB->getDebugLoc()) {}
+
+ MachineBasicBlock &MBB;
+ const MachineBasicBlock::instr_iterator InsertBefore;
+ const DebugLoc &DbgLoc;
+};
+
+void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg,
+ unsigned Constant) const {
+ (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(ARM::MOVi))
+ .addDef(DestReg)
+ .addImm(Constant)
+ .add(predOps(ARMCC::AL))
+ .add(condCodeOp());
+}
+
+bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo &MRI,
+ unsigned LHSReg, unsigned RHSReg,
+ unsigned ExpectedSize,
+ unsigned ExpectedRegBankID) const {
+ return MRI.getType(LHSReg) == MRI.getType(RHSReg) &&
+ validReg(MRI, LHSReg, ExpectedSize, ExpectedRegBankID) &&
+ validReg(MRI, RHSReg, ExpectedSize, ExpectedRegBankID);
+}
+
+bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg,
+ unsigned ExpectedSize,
+ unsigned ExpectedRegBankID) const {
+ if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) {
+ DEBUG(dbgs() << "Unexpected size for register");
return false;
+ }
- // Perform the comparison.
- auto LHSReg = MIB->getOperand(2).getReg();
- auto RHSReg = MIB->getOperand(3).getReg();
- assert(MRI.getType(LHSReg) == MRI.getType(RHSReg) &&
- MRI.getType(LHSReg).getSizeInBits() == 32 &&
- MRI.getType(RHSReg).getSizeInBits() == 32 &&
- "Unsupported types for comparison operation");
- auto CmpI = BuildMI(MBB, InsertBefore, DebugLoc, TII.get(ARM::CMPrr))
- .addUse(LHSReg)
- .addUse(RHSReg)
- .add(predOps(ARMCC::AL));
- if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI))
+ if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) {
+ DEBUG(dbgs() << "Unexpected register bank for register");
return false;
+ }
+
+ return true;
+}
+
+bool ARMInstructionSelector::selectCmp(CmpConstants Helper,
+ MachineInstrBuilder &MIB,
+ MachineRegisterInfo &MRI) const {
+ const InsertInfo I(MIB);
- // Move 1 into the result register if the flags say so.
auto ResReg = MIB->getOperand(0).getReg();
+ if (!validReg(MRI, ResReg, 1, ARM::GPRRegBankID))
+ return false;
+
auto Cond =
static_cast<CmpInst::Predicate>(MIB->getOperand(1).getPredicate());
- auto ARMCond = getComparePred(Cond);
- if (ARMCond == ARMCC::AL)
+ if (Cond == CmpInst::FCMP_TRUE || Cond == CmpInst::FCMP_FALSE) {
+ putConstant(I, ResReg, Cond == CmpInst::FCMP_TRUE ? 1 : 0);
+ MIB->eraseFromParent();
+ return true;
+ }
+
+ auto LHSReg = MIB->getOperand(2).getReg();
+ auto RHSReg = MIB->getOperand(3).getReg();
+ if (!validOpRegPair(MRI, LHSReg, RHSReg, Helper.OperandSize,
+ Helper.OperandRegBankID))
return false;
- auto Mov1I = BuildMI(MBB, InsertBefore, DebugLoc, TII.get(ARM::MOVCCi))
+ auto ARMConds = getComparePreds(Cond);
+ auto ZeroReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
+ putConstant(I, ZeroReg, 0);
+
+ if (ARMConds.second == ARMCC::AL) {
+ // Simple case, we only need one comparison and we're done.
+ if (!insertComparison(Helper, I, ResReg, ARMConds.first, LHSReg, RHSReg,
+ ZeroReg))
+ return false;
+ } else {
+ // Not so simple, we need two successive comparisons.
+ auto IntermediateRes = MRI.createVirtualRegister(&ARM::GPRRegClass);
+ if (!insertComparison(Helper, I, IntermediateRes, ARMConds.first, LHSReg,
+ RHSReg, ZeroReg))
+ return false;
+ if (!insertComparison(Helper, I, ResReg, ARMConds.second, LHSReg, RHSReg,
+ IntermediateRes))
+ return false;
+ }
+
+ MIB->eraseFromParent();
+ return true;
+}
+
+bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I,
+ unsigned ResReg,
+ ARMCC::CondCodes Cond,
+ unsigned LHSReg, unsigned RHSReg,
+ unsigned PrevRes) const {
+ // Perform the comparison.
+ auto CmpI =
+ BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Helper.ComparisonOpcode))
+ .addUse(LHSReg)
+ .addUse(RHSReg)
+ .add(predOps(ARMCC::AL));
+ if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI))
+ return false;
+
+ // Read the comparison flags (if necessary).
+ if (Helper.ReadFlagsOpcode != ARM::INSTRUCTION_LIST_END) {
+ auto ReadI = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc,
+ TII.get(Helper.ReadFlagsOpcode))
+ .add(predOps(ARMCC::AL));
+ if (!constrainSelectedInstRegOperands(*ReadI, TII, TRI, RBI))
+ return false;
+ }
+
+ // Select either 1 or the previous result based on the value of the flags.
+ auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(ARM::MOVCCi))
.addDef(ResReg)
- .addUse(Mov0I->getOperand(0).getReg())
+ .addUse(PrevRes)
.addImm(1)
- .add(predOps(ARMCond, ARM::CPSR));
+ .add(predOps(Cond, ARM::CPSR));
if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI))
return false;
- MIB->eraseFromParent();
return true;
}
bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB,
- const ARMBaseInstrInfo &TII,
- MachineRegisterInfo &MRI,
- const TargetRegisterInfo &TRI,
- const RegisterBankInfo &RBI) const {
+ MachineRegisterInfo &MRI) const {
auto &MBB = *MIB->getParent();
auto InsertBefore = std::next(MIB->getIterator());
- auto &DebugLoc = MIB->getDebugLoc();
+ auto &DbgLoc = MIB->getDebugLoc();
// Compare the condition to 0.
auto CondReg = MIB->getOperand(1).getReg();
- assert(MRI.getType(CondReg).getSizeInBits() == 1 &&
- RBI.getRegBank(CondReg, MRI, TRI)->getID() == ARM::GPRRegBankID &&
+ assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) &&
"Unsupported types for select operation");
- auto CmpI = BuildMI(MBB, InsertBefore, DebugLoc, TII.get(ARM::CMPri))
+ auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(ARM::CMPri))
.addUse(CondReg)
.addImm(0)
.add(predOps(ARMCC::AL));
@@ -376,13 +510,10 @@ bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB,
auto ResReg = MIB->getOperand(0).getReg();
auto TrueReg = MIB->getOperand(2).getReg();
auto FalseReg = MIB->getOperand(3).getReg();
- assert(MRI.getType(ResReg) == MRI.getType(TrueReg) &&
- MRI.getType(TrueReg) == MRI.getType(FalseReg) &&
- MRI.getType(FalseReg).getSizeInBits() == 32 &&
- RBI.getRegBank(TrueReg, MRI, TRI)->getID() == ARM::GPRRegBankID &&
- RBI.getRegBank(FalseReg, MRI, TRI)->getID() == ARM::GPRRegBankID &&
+ assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) &&
+ validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) &&
"Unsupported types for select operation");
- auto Mov1I = BuildMI(MBB, InsertBefore, DebugLoc, TII.get(ARM::MOVCCr))
+ auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(ARM::MOVCCr))
.addDef(ResReg)
.addUse(TrueReg)
.addUse(FalseReg)
@@ -494,10 +625,32 @@ bool ARMInstructionSelector::select(MachineInstr &I) const {
I.setDesc(TII.get(COPY));
return selectCopy(I, TII, MRI, TRI, RBI);
}
- case G_ICMP:
- return selectICmp(MIB, TII, MRI, TRI, RBI);
case G_SELECT:
- return selectSelect(MIB, TII, MRI, TRI, RBI);
+ return selectSelect(MIB, MRI);
+ case G_ICMP: {
+ CmpConstants Helper(ARM::CMPrr, ARM::INSTRUCTION_LIST_END,
+ ARM::GPRRegBankID, 32);
+ return selectCmp(Helper, MIB, MRI);
+ }
+ case G_FCMP: {
+ assert(TII.getSubtarget().hasVFP2() && "Can't select fcmp without VFP");
+
+ unsigned OpReg = I.getOperand(2).getReg();
+ unsigned Size = MRI.getType(OpReg).getSizeInBits();
+
+ if (Size == 64 && TII.getSubtarget().isFPOnlySP()) {
+ DEBUG(dbgs() << "Subtarget only supports single precision");
+ return false;
+ }
+ if (Size != 32 && Size != 64) {
+ DEBUG(dbgs() << "Unsupported size for G_FCMP operand");
+ return false;
+ }
+
+ CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT,
+ ARM::FPRRegBankID, Size);
+ return selectCmp(Helper, MIB, MRI);
+ }
case G_GEP:
I.setDesc(TII.get(ARM::ADDrr));
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
@@ -510,11 +663,10 @@ bool ARMInstructionSelector::select(MachineInstr &I) const {
break;
case G_CONSTANT: {
unsigned Reg = I.getOperand(0).getReg();
- if (MRI.getType(Reg).getSizeInBits() != 32)
+
+ if (!validReg(MRI, Reg, 32, ARM::GPRRegBankID))
return false;
- assert(RBI.getRegBank(Reg, MRI, TRI)->getID() == ARM::GPRRegBankID &&
- "Expected constant to live in a GPR");
I.setDesc(TII.get(ARM::MOVi));
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
diff --git a/lib/Target/ARM/ARMLegalizerInfo.cpp b/lib/Target/ARM/ARMLegalizerInfo.cpp
index f3e62d09cc30..f23e62595d2e 100644
--- a/lib/Target/ARM/ARMLegalizerInfo.cpp
+++ b/lib/Target/ARM/ARMLegalizerInfo.cpp
@@ -28,6 +28,10 @@ using namespace llvm;
#error "You shouldn't build this"
#endif
+static bool AEABI(const ARMSubtarget &ST) {
+ return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI();
+}
+
ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
using namespace TargetOpcode;
@@ -66,8 +70,7 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
for (unsigned Op : {G_SREM, G_UREM})
if (ST.hasDivideInARMMode())
setAction({Op, s32}, Lower);
- else if (ST.isTargetAEABI() || ST.isTargetGNUAEABI() ||
- ST.isTargetMuslAEABI())
+ else if (AEABI(ST))
setAction({Op, s32}, Custom);
else
setAction({Op, s32}, Libcall);
@@ -86,6 +89,8 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
setAction({G_SELECT, 1, s1}, Legal);
setAction({G_CONSTANT, s32}, Legal);
+ for (auto Ty : {s1, s8, s16})
+ setAction({G_CONSTANT, Ty}, WidenScalar);
setAction({G_ICMP, s1}, Legal);
for (auto Ty : {s8, s16})
@@ -99,9 +104,22 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
setAction({G_LOAD, s64}, Legal);
setAction({G_STORE, s64}, Legal);
+
+ setAction({G_FCMP, s1}, Legal);
+ setAction({G_FCMP, 1, s32}, Legal);
+ setAction({G_FCMP, 1, s64}, Legal);
} else {
for (auto Ty : {s32, s64})
setAction({G_FADD, Ty}, Libcall);
+
+ setAction({G_FCMP, s1}, Legal);
+ setAction({G_FCMP, 1, s32}, Custom);
+ setAction({G_FCMP, 1, s64}, Custom);
+
+ if (AEABI(ST))
+ setFCmpLibcallsAEABI();
+ else
+ setFCmpLibcallsGNU();
}
for (unsigned Op : {G_FREM, G_FPOW})
@@ -111,11 +129,120 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
computeTables();
}
+void ARMLegalizerInfo::setFCmpLibcallsAEABI() {
+ // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
+ // default-initialized.
+ FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
+ FCmp32Libcalls[CmpInst::FCMP_OEQ] = {
+ {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp32Libcalls[CmpInst::FCMP_OGE] = {
+ {RTLIB::OGE_F32, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp32Libcalls[CmpInst::FCMP_OGT] = {
+ {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp32Libcalls[CmpInst::FCMP_OLE] = {
+ {RTLIB::OLE_F32, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp32Libcalls[CmpInst::FCMP_OLT] = {
+ {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}};
+ FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_EQ}};
+ FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_EQ}};
+ FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_EQ}};
+ FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_EQ}};
+ FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_EQ}};
+ FCmp32Libcalls[CmpInst::FCMP_UNO] = {
+ {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp32Libcalls[CmpInst::FCMP_ONE] = {
+ {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE},
+ {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp32Libcalls[CmpInst::FCMP_UEQ] = {
+ {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE},
+ {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}};
+
+ FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
+ FCmp64Libcalls[CmpInst::FCMP_OEQ] = {
+ {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp64Libcalls[CmpInst::FCMP_OGE] = {
+ {RTLIB::OGE_F64, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp64Libcalls[CmpInst::FCMP_OGT] = {
+ {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp64Libcalls[CmpInst::FCMP_OLE] = {
+ {RTLIB::OLE_F64, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp64Libcalls[CmpInst::FCMP_OLT] = {
+ {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}};
+ FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_EQ}};
+ FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_EQ}};
+ FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_EQ}};
+ FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_EQ}};
+ FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_EQ}};
+ FCmp64Libcalls[CmpInst::FCMP_UNO] = {
+ {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp64Libcalls[CmpInst::FCMP_ONE] = {
+ {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE},
+ {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}};
+ FCmp64Libcalls[CmpInst::FCMP_UEQ] = {
+ {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE},
+ {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}};
+}
+
+void ARMLegalizerInfo::setFCmpLibcallsGNU() {
+ // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
+ // default-initialized.
+ FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
+ FCmp32Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}};
+ FCmp32Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F32, CmpInst::ICMP_SGE}};
+ FCmp32Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}};
+ FCmp32Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F32, CmpInst::ICMP_SLE}};
+ FCmp32Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F32, CmpInst::ICMP_SLT}};
+ FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}};
+ FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_SGE}};
+ FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_SGT}};
+ FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SLE}};
+ FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_SLT}};
+ FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_NE}};
+ FCmp32Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F32, CmpInst::ICMP_NE}};
+ FCmp32Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT},
+ {RTLIB::OLT_F32, CmpInst::ICMP_SLT}};
+ FCmp32Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ},
+ {RTLIB::UO_F32, CmpInst::ICMP_NE}};
+
+ FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
+ FCmp64Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}};
+ FCmp64Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F64, CmpInst::ICMP_SGE}};
+ FCmp64Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}};
+ FCmp64Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F64, CmpInst::ICMP_SLE}};
+ FCmp64Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F64, CmpInst::ICMP_SLT}};
+ FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}};
+ FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_SGE}};
+ FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_SGT}};
+ FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SLE}};
+ FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_SLT}};
+ FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_NE}};
+ FCmp64Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F64, CmpInst::ICMP_NE}};
+ FCmp64Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT},
+ {RTLIB::OLT_F64, CmpInst::ICMP_SLT}};
+ FCmp64Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ},
+ {RTLIB::UO_F64, CmpInst::ICMP_NE}};
+}
+
+ARMLegalizerInfo::FCmpLibcallsList
+ARMLegalizerInfo::getFCmpLibcalls(CmpInst::Predicate Predicate,
+ unsigned Size) const {
+ assert(CmpInst::isFPPredicate(Predicate) && "Unsupported FCmp predicate");
+ if (Size == 32)
+ return FCmp32Libcalls[Predicate];
+ if (Size == 64)
+ return FCmp64Libcalls[Predicate];
+ llvm_unreachable("Unsupported size for FCmp predicate");
+}
+
bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI,
MachineRegisterInfo &MRI,
MachineIRBuilder &MIRBuilder) const {
using namespace TargetOpcode;
+ MIRBuilder.setInstr(MI);
+
switch (MI.getOpcode()) {
default:
return false;
@@ -137,9 +264,9 @@ bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI,
auto RetVal = MRI.createGenericVirtualRegister(
getLLTForType(*RetTy, MIRBuilder.getMF().getDataLayout()));
- auto Status = replaceWithLibcall(MI, MIRBuilder, Libcall, {RetVal, RetTy},
- {{MI.getOperand(1).getReg(), ArgTy},
- {MI.getOperand(2).getReg(), ArgTy}});
+ auto Status = createLibcall(MIRBuilder, Libcall, {RetVal, RetTy},
+ {{MI.getOperand(1).getReg(), ArgTy},
+ {MI.getOperand(2).getReg(), ArgTy}});
if (Status != LegalizerHelper::Legalized)
return false;
@@ -149,8 +276,76 @@ bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI,
MIRBuilder.buildUnmerge(
{MRI.createGenericVirtualRegister(LLT::scalar(32)), OriginalResult},
RetVal);
+ break;
+ }
+ case G_FCMP: {
+ assert(MRI.getType(MI.getOperand(2).getReg()) ==
+ MRI.getType(MI.getOperand(3).getReg()) &&
+ "Mismatched operands for G_FCMP");
+ auto OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
+
+ auto OriginalResult = MI.getOperand(0).getReg();
+ auto Predicate =
+ static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
+ auto Libcalls = getFCmpLibcalls(Predicate, OpSize);
+
+ if (Libcalls.empty()) {
+ assert((Predicate == CmpInst::FCMP_TRUE ||
+ Predicate == CmpInst::FCMP_FALSE) &&
+ "Predicate needs libcalls, but none specified");
+ MIRBuilder.buildConstant(OriginalResult,
+ Predicate == CmpInst::FCMP_TRUE ? 1 : 0);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size");
+ auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
+ auto *RetTy = Type::getInt32Ty(Ctx);
+
+ SmallVector<unsigned, 2> Results;
+ for (auto Libcall : Libcalls) {
+ auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32));
+ auto Status =
+ createLibcall(MIRBuilder, Libcall.LibcallID, {LibcallResult, RetTy},
+ {{MI.getOperand(2).getReg(), ArgTy},
+ {MI.getOperand(3).getReg(), ArgTy}});
+
+ if (Status != LegalizerHelper::Legalized)
+ return false;
- return LegalizerHelper::Legalized;
+ auto ProcessedResult =
+ Libcalls.size() == 1
+ ? OriginalResult
+ : MRI.createGenericVirtualRegister(MRI.getType(OriginalResult));
+
+ // We have a result, but we need to transform it into a proper 1-bit 0 or
+ // 1, taking into account the different peculiarities of the values
+ // returned by the comparison functions.
+ CmpInst::Predicate ResultPred = Libcall.Predicate;
+ if (ResultPred == CmpInst::BAD_ICMP_PREDICATE) {
+ // We have a nice 0 or 1, and we just need to truncate it back to 1 bit
+ // to keep the types consistent.
+ MIRBuilder.buildTrunc(ProcessedResult, LibcallResult);
+ } else {
+ // We need to compare against 0.
+ assert(CmpInst::isIntPredicate(ResultPred) && "Unsupported predicate");
+ auto Zero = MRI.createGenericVirtualRegister(LLT::scalar(32));
+ MIRBuilder.buildConstant(Zero, 0);
+ MIRBuilder.buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero);
+ }
+ Results.push_back(ProcessedResult);
+ }
+
+ if (Results.size() != 1) {
+ assert(Results.size() == 2 && "Unexpected number of results");
+ MIRBuilder.buildOr(OriginalResult, Results[0], Results[1]);
+ }
+ break;
}
}
+
+ MI.eraseFromParent();
+ return true;
}
diff --git a/lib/Target/ARM/ARMLegalizerInfo.h b/lib/Target/ARM/ARMLegalizerInfo.h
index a9bdd367737e..78ab9412c04b 100644
--- a/lib/Target/ARM/ARMLegalizerInfo.h
+++ b/lib/Target/ARM/ARMLegalizerInfo.h
@@ -14,7 +14,10 @@
#ifndef LLVM_LIB_TARGET_ARM_ARMMACHINELEGALIZER_H
#define LLVM_LIB_TARGET_ARM_ARMMACHINELEGALIZER_H
+#include "llvm/ADT/IndexedMap.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+#include "llvm/IR/Instructions.h"
namespace llvm {
@@ -27,6 +30,36 @@ public:
bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &MIRBuilder) const override;
+
+private:
+ void setFCmpLibcallsGNU();
+ void setFCmpLibcallsAEABI();
+
+ struct FCmpLibcallInfo {
+ // Which libcall this is.
+ RTLIB::Libcall LibcallID;
+
+ // The predicate to be used when comparing the value returned by the
+ // function with a relevant constant (currently hard-coded to zero). This is
+ // necessary because often the libcall will return e.g. a value greater than
+ // 0 to represent 'true' and anything negative to represent 'false', or
+ // maybe 0 to represent 'true' and non-zero for 'false'. If no comparison is
+ // needed, this should be CmpInst::BAD_ICMP_PREDICATE.
+ CmpInst::Predicate Predicate;
+ };
+ using FCmpLibcallsList = SmallVector<FCmpLibcallInfo, 2>;
+
+ // Map from each FCmp predicate to the corresponding libcall infos. A FCmp
+ // instruction may be lowered to one or two libcalls, which is why we need a
+ // list. If two libcalls are needed, their results will be OR'ed.
+ using FCmpLibcallsMapTy = IndexedMap<FCmpLibcallsList>;
+
+ FCmpLibcallsMapTy FCmp32Libcalls;
+ FCmpLibcallsMapTy FCmp64Libcalls;
+
+ // Get the libcall(s) corresponding to \p Predicate for operands of \p Size
+ // bits.
+ FCmpLibcallsList getFCmpLibcalls(CmpInst::Predicate, unsigned Size) const;
};
} // End llvm namespace.
#endif
diff --git a/lib/Target/ARM/ARMRegisterBankInfo.cpp b/lib/Target/ARM/ARMRegisterBankInfo.cpp
index 11fb81a4f9fe..c0c09e8c15af 100644
--- a/lib/Target/ARM/ARMRegisterBankInfo.cpp
+++ b/lib/Target/ARM/ARMRegisterBankInfo.cpp
@@ -212,8 +212,6 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
- LLT Ty = MRI.getType(MI.getOperand(0).getReg());
-
unsigned NumOperands = MI.getNumOperands();
const ValueMapping *OperandsMapping = &ARM::ValueMappings[ARM::GPR3OpsIdx];
@@ -236,26 +234,31 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OperandsMapping = &ARM::ValueMappings[ARM::GPR3OpsIdx];
break;
case G_LOAD:
- case G_STORE:
+ case G_STORE: {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
OperandsMapping =
Ty.getSizeInBits() == 64
? getOperandsMapping({&ARM::ValueMappings[ARM::DPR3OpsIdx],
&ARM::ValueMappings[ARM::GPR3OpsIdx]})
: &ARM::ValueMappings[ARM::GPR3OpsIdx];
break;
- case G_FADD:
+ }
+ case G_FADD: {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
assert((Ty.getSizeInBits() == 32 || Ty.getSizeInBits() == 64) &&
"Unsupported size for G_FADD");
OperandsMapping = Ty.getSizeInBits() == 64
? &ARM::ValueMappings[ARM::DPR3OpsIdx]
: &ARM::ValueMappings[ARM::SPR3OpsIdx];
break;
+ }
case G_CONSTANT:
case G_FRAME_INDEX:
OperandsMapping =
getOperandsMapping({&ARM::ValueMappings[ARM::GPR3OpsIdx], nullptr});
break;
case G_SELECT: {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
LLT Ty2 = MRI.getType(MI.getOperand(1).getReg());
(void)Ty2;
assert(Ty.getSizeInBits() == 32 && "Unsupported size for G_SELECT");
@@ -277,9 +280,29 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
&ARM::ValueMappings[ARM::GPR3OpsIdx]});
break;
}
+ case G_FCMP: {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ LLT Ty1 = MRI.getType(MI.getOperand(2).getReg());
+ LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
+ (void)Ty2;
+ assert(Ty.getSizeInBits() == 1 && "Unsupported size for G_FCMP");
+ assert(Ty1.getSizeInBits() == Ty2.getSizeInBits() &&
+ "Mismatched operand sizes for G_FCMP");
+
+ unsigned Size = Ty1.getSizeInBits();
+ assert((Size == 32 || Size == 64) && "Unsupported size for G_FCMP");
+
+ auto FPRValueMapping = Size == 32 ? &ARM::ValueMappings[ARM::SPR3OpsIdx]
+ : &ARM::ValueMappings[ARM::DPR3OpsIdx];
+ OperandsMapping =
+ getOperandsMapping({&ARM::ValueMappings[ARM::GPR3OpsIdx], nullptr,
+ FPRValueMapping, FPRValueMapping});
+ break;
+ }
case G_MERGE_VALUES: {
// We only support G_MERGE_VALUES for creating a double precision floating
// point value out of two GPRs.
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
LLT Ty1 = MRI.getType(MI.getOperand(1).getReg());
LLT Ty2 = MRI.getType(MI.getOperand(2).getReg());
if (Ty.getSizeInBits() != 64 || Ty1.getSizeInBits() != 32 ||
@@ -294,6 +317,7 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case G_UNMERGE_VALUES: {
// We only support G_UNMERGE_VALUES for splitting a double precision
// floating point value into two GPRs.
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
LLT Ty1 = MRI.getType(MI.getOperand(1).getReg());
LLT Ty2 = MRI.getType(MI.getOperand(2).getReg());
if (Ty.getSizeInBits() != 32 || Ty1.getSizeInBits() != 32 ||
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 8eb9dbf5f9de..51b0fedd2b54 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -15,6 +15,24 @@ using namespace llvm;
#define DEBUG_TYPE "armtti"
+bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
+ const Function *Callee) const {
+ const TargetMachine &TM = getTLI()->getTargetMachine();
+ const FeatureBitset &CallerBits =
+ TM.getSubtargetImpl(*Caller)->getFeatureBits();
+ const FeatureBitset &CalleeBits =
+ TM.getSubtargetImpl(*Callee)->getFeatureBits();
+
+ // To inline a callee, all features not in the whitelist must match exactly.
+ bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) ==
+ (CalleeBits & ~InlineFeatureWhitelist);
+ // For features in the whitelist, the callee's features must be a subset of
+ // the callers'.
+ bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) ==
+ (CalleeBits & InlineFeatureWhitelist);
+ return MatchExact && MatchSubset;
+}
+
int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.h b/lib/Target/ARM/ARMTargetTransformInfo.h
index 8a1a37863877..0695a4e63346 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -33,6 +33,39 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
const ARMSubtarget *ST;
const ARMTargetLowering *TLI;
+ // Currently the following features are excluded from InlineFeatureWhitelist.
+ // ModeThumb, FeatureNoARM, ModeSoftFloat, FeatureVFPOnlySP, FeatureD16
+ // Depending on whether they are set or unset, different
+ // instructions/registers are available. For example, inlining a callee with
+ // -thumb-mode in a caller with +thumb-mode, may cause the assembler to
+ // fail if the callee uses ARM only instructions, e.g. in inline asm.
+ const FeatureBitset InlineFeatureWhitelist = {
+ ARM::FeatureVFP2, ARM::FeatureVFP3, ARM::FeatureNEON, ARM::FeatureThumb2,
+ ARM::FeatureFP16, ARM::FeatureVFP4, ARM::FeatureFPARMv8,
+ ARM::FeatureFullFP16, ARM::FeatureHWDivThumb,
+ ARM::FeatureHWDivARM, ARM::FeatureDB, ARM::FeatureV7Clrex,
+ ARM::FeatureAcquireRelease, ARM::FeatureSlowFPBrcc,
+ ARM::FeaturePerfMon, ARM::FeatureTrustZone, ARM::Feature8MSecExt,
+ ARM::FeatureCrypto, ARM::FeatureCRC, ARM::FeatureRAS,
+ ARM::FeatureFPAO, ARM::FeatureFuseAES, ARM::FeatureZCZeroing,
+ ARM::FeatureProfUnpredicate, ARM::FeatureSlowVGETLNi32,
+ ARM::FeatureSlowVDUP32, ARM::FeaturePreferVMOVSR,
+ ARM::FeaturePrefISHSTBarrier, ARM::FeatureMuxedUnits,
+ ARM::FeatureSlowOddRegister, ARM::FeatureSlowLoadDSubreg,
+ ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx,
+ ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs,
+ ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign,
+ ARM::FeatureHasSlowFPVMLx, ARM::FeatureVMLxForwarding,
+ ARM::FeaturePref32BitThumb, ARM::FeatureAvoidPartialCPSR,
+ ARM::FeatureCheapPredicableCPSR, ARM::FeatureAvoidMOVsShOp,
+ ARM::FeatureHasRetAddrStack, ARM::FeatureHasNoBranchPredictor,
+ ARM::FeatureDSP, ARM::FeatureMP, ARM::FeatureVirtualization,
+ ARM::FeatureMClass, ARM::FeatureRClass, ARM::FeatureAClass,
+ ARM::FeatureNaClTrap, ARM::FeatureStrictAlign, ARM::FeatureLongCalls,
+ ARM::FeatureExecuteOnly, ARM::FeatureReserveR9, ARM::FeatureNoMovt,
+ ARM::FeatureNoNegativeImmediates
+ };
+
const ARMSubtarget *getST() const { return ST; }
const ARMTargetLowering *getTLI() const { return TLI; }
@@ -41,6 +74,9 @@ public:
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
+ bool areInlineCompatible(const Function *Caller,
+ const Function *Callee) const;
+
bool enableInterleavedAccessVectorization() { return true; }
/// Floating-point computation using ARMv8 AArch32 Advanced
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 891b5c60e1fd..1129826f21f6 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -5249,6 +5249,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
// Fall though for the Identifier case that is not a register or a
// special name.
+ LLVM_FALLTHROUGH;
}
case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
case AsmToken::Integer: // things like 1f and 2b as a branch targets
@@ -8992,6 +8993,8 @@ unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
return PlainMatchResult;
}
+std::string ARMMnemonicSpellCheck(StringRef S, uint64_t FBS);
+
static const char *getSubtargetFeatureName(uint64_t Val);
bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
@@ -9085,9 +9088,13 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return Error(ErrorLoc, "invalid operand for instruction");
}
- case Match_MnemonicFail:
- return Error(IDLoc, "invalid instruction",
+ case Match_MnemonicFail: {
+ uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
+ std::string Suggestion = ARMMnemonicSpellCheck(
+ ((ARMOperand &)*Operands[0]).getToken(), FBS);
+ return Error(IDLoc, "invalid instruction" + Suggestion,
((ARMOperand &)*Operands[0]).getLocRange());
+ }
case Match_RequiresNotITBlock:
return Error(IDLoc, "flag setting instruction only valid outside IT block");
case Match_RequiresITBlock:
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 22de728fe06e..a77df7a2598f 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -361,9 +361,8 @@ static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
const MCFixup &Fixup,
const MCValue &Target, uint64_t Value,
- bool IsPCRel, MCContext &Ctx,
- bool IsLittleEndian,
- bool IsResolved) const {
+ bool IsResolved, MCContext &Ctx,
+ bool IsLittleEndian) const {
unsigned Kind = Fixup.getKind();
// MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
@@ -392,7 +391,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
case FK_SecRel_4:
return Value;
case ARM::fixup_arm_movt_hi16:
- if (!IsPCRel)
+ if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
Value >>= 16;
LLVM_FALLTHROUGH;
case ARM::fixup_arm_movw_lo16: {
@@ -404,7 +403,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
return Value;
}
case ARM::fixup_t2_movt_hi16:
- if (!IsPCRel)
+ if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
Value >>= 16;
LLVM_FALLTHROUGH;
case ARM::fixup_t2_movw_lo16: {
@@ -885,11 +884,11 @@ static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
- bool IsPCRel) const {
+ bool IsResolved) const {
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
MCContext &Ctx = Asm.getContext();
- Value = adjustFixupValue(Asm, Fixup, Target, Value, IsPCRel, Ctx,
- IsLittleEndian, true);
+ Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx,
+ IsLittleEndian);
if (!Value)
return; // Doesn't change encoding.
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
index 84b54bbb9a49..02374966dafe 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
@@ -42,13 +42,13 @@ public:
const MCValue &Target) override;
unsigned adjustFixupValue(const MCAssembler &Asm, const MCFixup &Fixup,
- const MCValue &Target, uint64_t Value, bool IsPCRel,
- MCContext &Ctx, bool IsLittleEndian,
- bool IsResolved) const;
+ const MCValue &Target, uint64_t Value,
+ bool IsResolved, MCContext &Ctx,
+ bool IsLittleEndian) const;
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override;
+ uint64_t Value, bool IsResolved) const override;
unsigned getRelaxedOpcode(unsigned Op) const;
diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp
index 0b6574c37de1..5709b4e61798 100644
--- a/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -236,7 +236,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R12:
if (STI.splitFramePushPop(MF))
break;
- // fallthough
+ LLVM_FALLTHROUGH;
case ARM::R0:
case ARM::R1:
case ARM::R2:
diff --git a/lib/Target/AVR/AVRAsmPrinter.cpp b/lib/Target/AVR/AVRAsmPrinter.cpp
index f0c7b11895b4..c058c9e1f534 100644
--- a/lib/Target/AVR/AVRAsmPrinter.cpp
+++ b/lib/Target/AVR/AVRAsmPrinter.cpp
@@ -149,7 +149,10 @@ bool AVRAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
(void)MO;
assert(MO.isReg() && "Unexpected inline asm memory operand");
- // TODO: We can look up the alternative name for the register if it's given.
+ // TODO: We should be able to look up the alternative name for
+ // the register if it's given.
+ // TableGen doesn't expose a way of getting retrieving names
+ // for registers.
if (MI->getOperand(OpNum).getReg() == AVR::R31R30) {
O << "Z";
} else {
diff --git a/lib/Target/AVR/AVRDevices.td b/lib/Target/AVR/AVRDevices.td
index 9224af613d14..62def4574437 100644
--- a/lib/Target/AVR/AVRDevices.td
+++ b/lib/Target/AVR/AVRDevices.td
@@ -6,7 +6,6 @@
// :TODO: We define all devices with SRAM to have all variants of LD/ST/LDD/STD.
// In reality, avr1 (no SRAM) has one variant each of `LD` and `ST`.
// avr2 (with SRAM) adds the rest of the variants.
-// :TODO: s/AVRTiny/Tiny
// A feature set aggregates features, grouping them. We don't want to create a
@@ -136,7 +135,7 @@ def ELFArchAVR4 : ELFArch<"EF_AVR_ARCH_AVR4">;
def ELFArchAVR5 : ELFArch<"EF_AVR_ARCH_AVR5">;
def ELFArchAVR51 : ELFArch<"EF_AVR_ARCH_AVR51">;
def ELFArchAVR6 : ELFArch<"EF_AVR_ARCH_AVR6">;
-def ELFArchAVRTiny : ELFArch<"EF_AVR_ARCH_AVRTINY">;
+def ELFArchTiny : ELFArch<"EF_AVR_ARCH_AVRTINY">;
def ELFArchXMEGA1 : ELFArch<"EF_AVR_ARCH_XMEGA1">;
def ELFArchXMEGA2 : ELFArch<"EF_AVR_ARCH_XMEGA2">;
def ELFArchXMEGA3 : ELFArch<"EF_AVR_ARCH_XMEGA3">;
@@ -189,7 +188,7 @@ def FamilyAVR51 : Family<"avr51",
def FamilyAVR6 : Family<"avr6",
[FamilyAVR51]>;
-def FamilyAVRTiny : Family<"avrtiny",
+def FamilyTiny : Family<"avrtiny",
[FamilyAVR0, FeatureBREAK, FeatureSRAM,
FeatureTinyEncoding]>;
@@ -240,7 +239,7 @@ def : Device<"avrxmega4", FamilyXMEGA, ELFArchXMEGA4>;
def : Device<"avrxmega5", FamilyXMEGA, ELFArchXMEGA5>;
def : Device<"avrxmega6", FamilyXMEGA, ELFArchXMEGA6>;
def : Device<"avrxmega7", FamilyXMEGA, ELFArchXMEGA7>;
-def : Device<"avrtiny", FamilyAVRTiny, ELFArchAVRTiny>;
+def : Device<"avrtiny", FamilyTiny, ELFArchTiny>;
// Specific MCUs
def : Device<"at90s1200", FamilyAVR0, ELFArchAVR1>;
@@ -480,12 +479,12 @@ def : Device<"atxmega384d3", FamilyXMEGA, ELFArchXMEGA6>;
def : Device<"atxmega128a1", FamilyXMEGA, ELFArchXMEGA7>;
def : Device<"atxmega128a1u", FamilyXMEGAU, ELFArchXMEGA7>;
def : Device<"atxmega128a4u", FamilyXMEGAU, ELFArchXMEGA7>;
-def : Device<"attiny4", FamilyAVRTiny, ELFArchAVRTiny>;
-def : Device<"attiny5", FamilyAVRTiny, ELFArchAVRTiny>;
-def : Device<"attiny9", FamilyAVRTiny, ELFArchAVRTiny>;
-def : Device<"attiny10", FamilyAVRTiny, ELFArchAVRTiny>;
-def : Device<"attiny20", FamilyAVRTiny, ELFArchAVRTiny>;
-def : Device<"attiny40", FamilyAVRTiny, ELFArchAVRTiny>;
-def : Device<"attiny102", FamilyAVRTiny, ELFArchAVRTiny>;
-def : Device<"attiny104", FamilyAVRTiny, ELFArchAVRTiny>;
+def : Device<"attiny4", FamilyTiny, ELFArchTiny>;
+def : Device<"attiny5", FamilyTiny, ELFArchTiny>;
+def : Device<"attiny9", FamilyTiny, ELFArchTiny>;
+def : Device<"attiny10", FamilyTiny, ELFArchTiny>;
+def : Device<"attiny20", FamilyTiny, ELFArchTiny>;
+def : Device<"attiny40", FamilyTiny, ELFArchTiny>;
+def : Device<"attiny102", FamilyTiny, ELFArchTiny>;
+def : Device<"attiny104", FamilyTiny, ELFArchTiny>;
diff --git a/lib/Target/AVR/AVRInstrInfo.cpp b/lib/Target/AVR/AVRInstrInfo.cpp
index afba66b2e69b..744aa723c416 100644
--- a/lib/Target/AVR/AVRInstrInfo.cpp
+++ b/lib/Target/AVR/AVRInstrInfo.cpp
@@ -402,7 +402,7 @@ unsigned AVRInstrInfo::insertBranch(MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Cond,
const DebugLoc &DL,
int *BytesAdded) const {
- assert(!BytesAdded && "code size not handled");
+ if (BytesAdded) *BytesAdded = 0;
// Shouldn't be a fall through.
assert(TBB && "insertBranch must not be told to insert a fallthrough");
@@ -411,19 +411,24 @@ unsigned AVRInstrInfo::insertBranch(MachineBasicBlock &MBB,
if (Cond.empty()) {
assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, DL, get(AVR::RJMPk)).addMBB(TBB);
+ auto &MI = *BuildMI(&MBB, DL, get(AVR::RJMPk)).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(MI);
return 1;
}
// Conditional branch.
unsigned Count = 0;
AVRCC::CondCodes CC = (AVRCC::CondCodes)Cond[0].getImm();
- BuildMI(&MBB, DL, getBrCond(CC)).addMBB(TBB);
+ auto &CondMI = *BuildMI(&MBB, DL, getBrCond(CC)).addMBB(TBB);
+
+ if (BytesAdded) *BytesAdded += getInstSizeInBytes(CondMI);
++Count;
if (FBB) {
// Two-way Conditional branch. Insert the second branch.
- BuildMI(&MBB, DL, get(AVR::RJMPk)).addMBB(FBB);
+ auto &MI = *BuildMI(&MBB, DL, get(AVR::RJMPk)).addMBB(FBB);
+ if (BytesAdded) *BytesAdded += getInstSizeInBytes(MI);
++Count;
}
@@ -432,7 +437,7 @@ unsigned AVRInstrInfo::insertBranch(MachineBasicBlock &MBB,
unsigned AVRInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
- assert(!BytesRemoved && "code size not handled");
+ if (BytesRemoved) *BytesRemoved = 0;
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
@@ -450,6 +455,7 @@ unsigned AVRInstrInfo::removeBranch(MachineBasicBlock &MBB,
}
// Remove the branch.
+ if (BytesRemoved) *BytesRemoved += getInstSizeInBytes(*I);
I->eraseFromParent();
I = MBB.end();
++Count;
@@ -494,5 +500,61 @@ unsigned AVRInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
}
}
+MachineBasicBlock *
+AVRInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("unexpected opcode!");
+ case AVR::JMPk:
+ case AVR::CALLk:
+ case AVR::RCALLk:
+ case AVR::RJMPk:
+ case AVR::BREQk:
+ case AVR::BRNEk:
+ case AVR::BRSHk:
+ case AVR::BRLOk:
+ case AVR::BRMIk:
+ case AVR::BRPLk:
+ case AVR::BRGEk:
+ case AVR::BRLTk:
+ return MI.getOperand(0).getMBB();
+ case AVR::BRBSsk:
+ case AVR::BRBCsk:
+ return MI.getOperand(1).getMBB();
+ case AVR::SBRCRrB:
+ case AVR::SBRSRrB:
+ case AVR::SBICAb:
+ case AVR::SBISAb:
+ llvm_unreachable("unimplemented branch instructions");
+ }
+}
+
+bool AVRInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
+ int64_t BrOffset) const {
+
+ switch (BranchOp) {
+ default:
+ llvm_unreachable("unexpected opcode!");
+ case AVR::JMPk:
+ case AVR::CALLk:
+ assert(BrOffset >= 0 && "offset must be absolute address");
+ return isUIntN(16, BrOffset);
+ case AVR::RCALLk:
+ case AVR::RJMPk:
+ return isIntN(13, BrOffset);
+ case AVR::BRBSsk:
+ case AVR::BRBCsk:
+ case AVR::BREQk:
+ case AVR::BRNEk:
+ case AVR::BRSHk:
+ case AVR::BRLOk:
+ case AVR::BRMIk:
+ case AVR::BRPLk:
+ case AVR::BRGEk:
+ case AVR::BRLTk:
+ return isIntN(7, BrOffset);
+ }
+}
+
} // end of namespace llvm
diff --git a/lib/Target/AVR/AVRInstrInfo.h b/lib/Target/AVR/AVRInstrInfo.h
index c5105dafe5eb..f42d34fb2848 100644
--- a/lib/Target/AVR/AVRInstrInfo.h
+++ b/lib/Target/AVR/AVRInstrInfo.h
@@ -103,6 +103,10 @@ public:
bool
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+ MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
+
+ bool isBranchOffsetInRange(unsigned BranchOpc,
+ int64_t BrOffset) const override;
private:
const AVRRegisterInfo RI;
};
diff --git a/lib/Target/AVR/AVRInstrInfo.td b/lib/Target/AVR/AVRInstrInfo.td
index 5dd8b2c27b21..184e4d53f7c8 100644
--- a/lib/Target/AVR/AVRInstrInfo.td
+++ b/lib/Target/AVR/AVRInstrInfo.td
@@ -1411,17 +1411,11 @@ hasSideEffects = 0 in
def LPMRdZ : FLPMX<0,
0,
(outs GPR8:$dst),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"lpm\t$dst, $z",
[]>,
Requires<[HasLPMX]>;
- def LPMWRdZ : Pseudo<(outs DREGS:$dst),
- (ins ZREGS:$z),
- "lpmw\t$dst, $z",
- []>,
- Requires<[HasLPMX]>;
-
// Load program memory, while postincrementing the Z register.
let mayLoad = 1,
Defs = [R31R30] in
@@ -1429,13 +1423,19 @@ hasSideEffects = 0 in
def LPMRdZPi : FLPMX<0,
1,
(outs GPR8:$dst),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"lpm\t$dst, $z+",
[]>,
Requires<[HasLPMX]>;
+ def LPMWRdZ : Pseudo<(outs DREGS:$dst),
+ (ins ZREG:$z),
+ "lpmw\t$dst, $z",
+ []>,
+ Requires<[HasLPMX]>;
+
def LPMWRdZPi : Pseudo<(outs DREGS:$dst),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"lpmw\t$dst, $z+",
[]>,
Requires<[HasLPMX]>;
@@ -1458,7 +1458,7 @@ hasSideEffects = 0 in
def ELPMRdZ : FLPMX<1,
0,
(outs GPR8:$dst),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"elpm\t$dst, $z",
[]>,
Requires<[HasELPMX]>;
@@ -1467,7 +1467,7 @@ hasSideEffects = 0 in
def ELPMRdZPi : FLPMX<1,
1,
(outs GPR8:$dst),
- (ins ZREGS: $z),
+ (ins ZREG: $z),
"elpm\t$dst, $z+",
[]>,
Requires<[HasELPMX]>;
@@ -1487,7 +1487,7 @@ let Uses = [R1, R0] in
let Defs = [R31R30] in
def SPMZPi : F16<0b1001010111111000,
(outs),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"spm $z+",
[]>,
Requires<[HasSPMX]>;
@@ -1564,28 +1564,28 @@ hasSideEffects = 0 in
// Read-Write-Modify (RMW) instructions.
def XCHZRd : FZRd<0b100,
(outs GPR8:$rd),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"xch\t$z, $rd",
[]>,
Requires<[SupportsRMW]>;
def LASZRd : FZRd<0b101,
(outs GPR8:$rd),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"las\t$z, $rd",
[]>,
Requires<[SupportsRMW]>;
def LACZRd : FZRd<0b110,
(outs GPR8:$rd),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"lac\t$z, $rd",
[]>,
Requires<[SupportsRMW]>;
def LATZRd : FZRd<0b111,
(outs GPR8:$rd),
- (ins ZREGS:$z),
+ (ins ZREG:$z),
"lat\t$z, $rd",
[]>,
Requires<[SupportsRMW]>;
diff --git a/lib/Target/AVR/AVRMCInstLower.cpp b/lib/Target/AVR/AVRMCInstLower.cpp
index 475dda420e89..dfefd09bc4b8 100644
--- a/lib/Target/AVR/AVRMCInstLower.cpp
+++ b/lib/Target/AVR/AVRMCInstLower.cpp
@@ -37,10 +37,22 @@ MCOperand AVRMCInstLower::lowerSymbolOperand(const MachineOperand &MO,
Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
}
+ bool IsFunction = MO.isGlobal() && isa<Function>(MO.getGlobal());
+
if (TF & AVRII::MO_LO) {
- Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_LO8, Expr, IsNegated, Ctx);
+ if (IsFunction) {
+ // N.B. Should we use _GS fixups here to cope with >128k progmem?
+ Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_PM_LO8, Expr, IsNegated, Ctx);
+ } else {
+ Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_LO8, Expr, IsNegated, Ctx);
+ }
} else if (TF & AVRII::MO_HI) {
- Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_HI8, Expr, IsNegated, Ctx);
+ if (IsFunction) {
+ // N.B. Should we use _GS fixups here to cope with >128k progmem?
+ Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_PM_HI8, Expr, IsNegated, Ctx);
+ } else {
+ Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_HI8, Expr, IsNegated, Ctx);
+ }
} else if (TF != 0) {
llvm_unreachable("Unknown target flag on symbol operand");
}
diff --git a/lib/Target/AVR/AVRRegisterInfo.cpp b/lib/Target/AVR/AVRRegisterInfo.cpp
index 55f3f5cf428a..249dc5512c28 100644
--- a/lib/Target/AVR/AVRRegisterInfo.cpp
+++ b/lib/Target/AVR/AVRRegisterInfo.cpp
@@ -95,7 +95,8 @@ AVRRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
}
/// Fold a frame offset shared between two add instructions into a single one.
-static void foldFrameOffset(MachineInstr &MI, int &Offset, unsigned DstReg) {
+static void foldFrameOffset(MachineBasicBlock::iterator &II, int &Offset, unsigned DstReg) {
+ MachineInstr &MI = *II;
int Opcode = MI.getOpcode();
// Don't bother trying if the next instruction is not an add or a sub.
@@ -120,6 +121,7 @@ static void foldFrameOffset(MachineInstr &MI, int &Offset, unsigned DstReg) {
}
// Finally remove the instruction.
+ II++;
MI.eraseFromParent();
}
@@ -158,6 +160,8 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned DstReg = MI.getOperand(0).getReg();
assert(DstReg != AVR::R29R28 && "Dest reg cannot be the frame pointer");
+ II++; // Skip over the FRMIDX (and now MOVW) instruction.
+
// Generally, to load a frame address two add instructions are emitted that
// could get folded into a single one:
// movw r31:r30, r29:r28
@@ -166,7 +170,8 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// to:
// movw r31:r30, r29:r28
// adiw r31:r30, 45
- foldFrameOffset(*std::next(II), Offset, DstReg);
+ if (II != MBB.end())
+ foldFrameOffset(II, Offset, DstReg);
// Select the best opcode based on DstReg and the offset size.
switch (DstReg) {
@@ -187,7 +192,7 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
}
- MachineInstr *New = BuildMI(MBB, std::next(II), dl, TII.get(Opcode), DstReg)
+ MachineInstr *New = BuildMI(MBB, II, dl, TII.get(Opcode), DstReg)
.addReg(DstReg, RegState::Kill)
.addImm(Offset);
New->getOperand(3).setIsDead();
diff --git a/lib/Target/AVR/AVRRegisterInfo.td b/lib/Target/AVR/AVRRegisterInfo.td
index 32650fc66751..8162f12052be 100644
--- a/lib/Target/AVR/AVRRegisterInfo.td
+++ b/lib/Target/AVR/AVRRegisterInfo.td
@@ -110,8 +110,6 @@ CoveredBySubRegs = 1 in
// Register Classes
//===----------------------------------------------------------------------===//
-//:TODO: use proper set instructions instead of using always "add"
-
// Main 8-bit register class.
def GPR8 : RegisterClass<"AVR", [i8], 8,
(
@@ -199,14 +197,11 @@ def PTRDISPREGS : RegisterClass<"AVR", [i16], 8,
// We have a bunch of instructions with an explicit Z register argument. We
// model this using a register class containing only the Z register.
-// :TODO: Rename to 'ZREG'.
-def ZREGS : RegisterClass<"AVR", [i16], 8, (add R31R30)>;
+def ZREG : RegisterClass<"AVR", [i16], 8, (add R31R30)>;
// Register class used for the stack read pseudo instruction.
def GPRSP: RegisterClass<"AVR", [i16], 8, (add SP)>;
-//:TODO: if we remove this we get an error in tablegen
-//:TODO: this is just a hack, remove it once add16 works!
// Status register.
def SREG : AVRReg<14, "FLAGS">, DwarfRegNum<[88]>;
def CCR : RegisterClass<"AVR", [i8], 8, (add SREG)>
diff --git a/lib/Target/AVR/AVRTargetMachine.cpp b/lib/Target/AVR/AVRTargetMachine.cpp
index 91d2a8737b87..a9d61ffc952c 100644
--- a/lib/Target/AVR/AVRTargetMachine.cpp
+++ b/lib/Target/AVR/AVRTargetMachine.cpp
@@ -66,6 +66,7 @@ public:
bool addInstSelector() override;
void addPreSched2() override;
+ void addPreEmitPass() override;
void addPreRegAlloc() override;
};
} // namespace
@@ -115,4 +116,9 @@ void AVRPassConfig::addPreSched2() {
addPass(createAVRExpandPseudoPass());
}
+void AVRPassConfig::addPreEmitPass() {
+ // Must run branch selection immediately preceding the asm printer.
+ addPass(&BranchRelaxationPassID);
+}
+
} // end of namespace llvm
diff --git a/lib/Target/AVR/AsmParser/AVRAsmParser.cpp b/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
index cf52e552978f..5004736365c7 100644
--- a/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
+++ b/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
@@ -466,6 +466,7 @@ bool AVRAsmParser::parseOperand(OperandVector &Operands) {
if (!tryParseRegisterOperand(Operands)) {
return false;
}
+ LLVM_FALLTHROUGH;
case AsmToken::LParen:
case AsmToken::Integer:
case AsmToken::Dot:
diff --git a/lib/Target/AVR/InstPrinter/AVRInstPrinter.cpp b/lib/Target/AVR/InstPrinter/AVRInstPrinter.cpp
index 316b7836df0d..0f34b8e18ff9 100644
--- a/lib/Target/AVR/InstPrinter/AVRInstPrinter.cpp
+++ b/lib/Target/AVR/InstPrinter/AVRInstPrinter.cpp
@@ -106,7 +106,7 @@ void AVRInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
if (Op.isReg()) {
bool isPtrReg = (MOI.RegClass == AVR::PTRREGSRegClassID) ||
(MOI.RegClass == AVR::PTRDISPREGSRegClassID) ||
- (MOI.RegClass == AVR::ZREGSRegClassID);
+ (MOI.RegClass == AVR::ZREGRegClassID);
if (isPtrReg) {
O << getRegisterName(Op.getReg(), AVR::ptr);
diff --git a/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp b/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp
index 1e61eccf775f..6d126ed622aa 100644
--- a/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp
+++ b/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp
@@ -33,7 +33,7 @@ static unsigned getEFlagsForFeatureSet(const FeatureBitset &Features) {
EFlags |= ELF::EF_AVR_ARCH_AVR51;
else if (Features[AVR::ELFArchAVR6])
EFlags |= ELF::EF_AVR_ARCH_AVR6;
- else if (Features[AVR::ELFArchAVRTiny])
+ else if (Features[AVR::ELFArchTiny])
EFlags |= ELF::EF_AVR_ARCH_AVRTINY;
else if (Features[AVR::ELFArchXMEGA1])
EFlags |= ELF::EF_AVR_ARCH_XMEGA1;
diff --git a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
index 15e89fb2a261..9fc812cdef14 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
@@ -29,7 +29,7 @@ public:
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override;
+ uint64_t Value, bool IsResolved) const override;
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override;
@@ -65,7 +65,7 @@ bool BPFAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
void BPFAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
- bool IsPCRel) const {
+ bool IsResolved) const {
if (Fixup.getKind() == FK_SecRel_4 || Fixup.getKind() == FK_SecRel_8) {
assert(Value == 0);
} else if (Fixup.getKind() == FK_Data_4 || Fixup.getKind() == FK_Data_8) {
diff --git a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index c19e636d79ca..d901abbd1692 100644
--- a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -1413,6 +1413,7 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
// Translate a "$Rx = CONST32(#imm)" to "$Rx = memw(gp+#LABEL) "
case Hexagon::CONST32:
is32bit = true;
+ LLVM_FALLTHROUGH;
// Translate a "$Rx:y = CONST64(#imm)" to "$Rx:y = memd(gp+#LABEL) "
case Hexagon::CONST64:
// FIXME: need better way to detect AsmStreamer (upstream removed getKind())
diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 14c682c6df4b..b064778c4bbd 100644
--- a/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -1947,8 +1947,10 @@ bool BitSimplification::genStoreImmediate(MachineInstr *MI) {
switch (Opc) {
case Hexagon::S2_storeri_io:
Align++;
+ LLVM_FALLTHROUGH;
case Hexagon::S2_storerh_io:
Align++;
+ LLVM_FALLTHROUGH;
case Hexagon::S2_storerb_io:
break;
default:
diff --git a/lib/Target/Hexagon/HexagonBitTracker.cpp b/lib/Target/Hexagon/HexagonBitTracker.cpp
index 730026121d3b..3de531088240 100644
--- a/lib/Target/Hexagon/HexagonBitTracker.cpp
+++ b/lib/Target/Hexagon/HexagonBitTracker.cpp
@@ -937,6 +937,7 @@ bool HexagonEvaluator::evaluate(const MachineInstr &BI,
case Hexagon::J2_jumpfnew:
case Hexagon::J2_jumpfnewpt:
Negated = true;
+ LLVM_FALLTHROUGH;
case Hexagon::J2_jumpt:
case Hexagon::J2_jumptpt:
case Hexagon::J2_jumptnew:
diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp
index aa68f6cfdfc1..49ddd6961f8a 100644
--- a/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -2244,6 +2244,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &BrI,
case Hexagon::J2_jumpfnew:
case Hexagon::J2_jumpfnewpt:
Negated = true;
+ LLVM_FALLTHROUGH;
case Hexagon::J2_jumpt:
case Hexagon::J2_jumptnew:
case Hexagon::J2_jumptnewpt:
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 97a53dcbaed7..c790579ccebc 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -979,18 +979,6 @@ bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
if (MFI.hasCalls() || HMFI.hasClobberLR())
return true;
- // Frame pointer elimination is a possiblility at this point, but
- // to know if FP is necessary we need to know if spill/restore
- // functions will be used (they require FP to be valid).
- // This means that hasFP shouldn't really be called before CSI is
- // calculated, and some measures are taken to make sure of that
- // (e.g. default implementations of virtual functions that call it
- // are overridden apropriately).
- assert(MFI.isCalleeSavedInfoValid() && "Need to know CSI");
- const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
- if (useSpillFunction(MF, CSI) || useRestoreFunction(MF, CSI))
- return true;
-
return false;
}
@@ -2437,6 +2425,8 @@ bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,
const CSIVect &CSI) const {
if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn())
return true;
+ if (!hasFP(MF))
+ return true;
if (!isOptSize(MF) && !isMinSize(MF))
if (MF.getTarget().getOptLevel() > CodeGenOpt::Default)
return true;
diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp
index f14c733dcf51..3470480d607d 100644
--- a/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -334,6 +334,7 @@ bool HexagonGenPredicate::isScalarPred(Register PredReg) {
if (MRI->getRegClass(PR.R) != PredRC)
return false;
// If it is a copy between two predicate registers, fall through.
+ LLVM_FALLTHROUGH;
}
case Hexagon::C2_and:
case Hexagon::C2_andn:
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index e5f49ca77a91..0163b2e2bdc4 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -241,22 +241,31 @@ void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
- if (isAlignedMemNode(LD))
- Opcode = IsValidInc ? Hexagon::V6_vL32b_pi : Hexagon::V6_vL32b_ai;
- else
+ if (isAlignedMemNode(LD)) {
+ if (LD->isNonTemporal())
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi : Hexagon::V6_vL32b_nt_ai;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_pi : Hexagon::V6_vL32b_ai;
+ } else {
Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi : Hexagon::V6_vL32Ub_ai;
+ }
break;
// 128B
case MVT::v128i8:
case MVT::v64i16:
case MVT::v32i32:
case MVT::v16i64:
- if (isAlignedMemNode(LD))
- Opcode = IsValidInc ? Hexagon::V6_vL32b_pi_128B
- : Hexagon::V6_vL32b_ai_128B;
- else
+ if (isAlignedMemNode(LD)) {
+ if (LD->isNonTemporal())
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi_128B
+ : Hexagon::V6_vL32b_nt_ai_128B;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_pi_128B
+ : Hexagon::V6_vL32b_ai_128B;
+ } else {
Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi_128B
: Hexagon::V6_vL32Ub_ai_128B;
+ }
break;
default:
llvm_unreachable("Unexpected memory type in indexed load");
@@ -529,22 +538,31 @@ void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
- if (isAlignedMemNode(ST))
- Opcode = IsValidInc ? Hexagon::V6_vS32b_pi : Hexagon::V6_vS32b_ai;
- else
+ if (isAlignedMemNode(ST)) {
+ if (ST->isNonTemporal())
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi : Hexagon::V6_vS32b_nt_ai;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_pi : Hexagon::V6_vS32b_ai;
+ } else {
Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi : Hexagon::V6_vS32Ub_ai;
+ }
break;
// 128B
case MVT::v128i8:
case MVT::v64i16:
case MVT::v32i32:
case MVT::v16i64:
- if (isAlignedMemNode(ST))
- Opcode = IsValidInc ? Hexagon::V6_vS32b_pi_128B
- : Hexagon::V6_vS32b_ai_128B;
- else
+ if (isAlignedMemNode(ST)) {
+ if (ST->isNonTemporal())
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi_128B
+ : Hexagon::V6_vS32b_nt_ai_128B;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_pi_128B
+ : Hexagon::V6_vS32b_ai_128B;
+ } else {
Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi_128B
: Hexagon::V6_vS32Ub_ai_128B;
+ }
break;
default:
llvm_unreachable("Unexpected memory type in indexed store");
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index 2daacf795555..67242764d453 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -716,6 +716,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
auto PtrVT = getPointerTy(MF.getDataLayout());
// Check for varargs.
@@ -832,7 +833,6 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (NeedsArgAlign && Subtarget.hasV60TOps()) {
DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
- MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
// V6 vectors passed by value have 64 or 128 byte alignment depending
// on whether we are 64 byte vector mode or 128 byte.
bool UseHVXDbl = Subtarget.useHVXDblOps();
@@ -916,10 +916,15 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Ops.push_back(Glue);
if (IsTailCall) {
- MF.getFrameInfo().setHasTailCall();
+ MFI.setHasTailCall();
return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
}
+ // Set this here because we need to know this for "hasFP" in frame lowering.
+ // The target-independent code calls getFrameRegister before setting it, and
+ // getFrameRegister uses hasFP to determine whether the function has FP.
+ MFI.setHasCalls(true);
+
unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
Glue = Chain.getValue(1);
@@ -1284,11 +1289,9 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// Creates a SPLAT instruction for a constant value VAL.
static SDValue createSplat(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
SDValue Val) {
- if (VT.getSimpleVT() == MVT::v4i8)
- return DAG.getNode(HexagonISD::VSPLATB, dl, VT, Val);
-
- if (VT.getSimpleVT() == MVT::v4i16)
- return DAG.getNode(HexagonISD::VSPLATH, dl, VT, Val);
+ EVT T = VT.getVectorElementType();
+ if (T == MVT::i8 || T == MVT::i16)
+ return DAG.getNode(HexagonISD::VSPLAT, dl, VT, Val);
return SDValue();
}
@@ -2296,32 +2299,13 @@ const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
case HexagonISD::JT: return "HexagonISD::JT";
case HexagonISD::PACKHL: return "HexagonISD::PACKHL";
case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
- case HexagonISD::SHUFFEB: return "HexagonISD::SHUFFEB";
- case HexagonISD::SHUFFEH: return "HexagonISD::SHUFFEH";
- case HexagonISD::SHUFFOB: return "HexagonISD::SHUFFOB";
- case HexagonISD::SHUFFOH: return "HexagonISD::SHUFFOH";
case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
- case HexagonISD::VCMPBEQ: return "HexagonISD::VCMPBEQ";
- case HexagonISD::VCMPBGT: return "HexagonISD::VCMPBGT";
- case HexagonISD::VCMPBGTU: return "HexagonISD::VCMPBGTU";
- case HexagonISD::VCMPHEQ: return "HexagonISD::VCMPHEQ";
- case HexagonISD::VCMPHGT: return "HexagonISD::VCMPHGT";
- case HexagonISD::VCMPHGTU: return "HexagonISD::VCMPHGTU";
- case HexagonISD::VCMPWEQ: return "HexagonISD::VCMPWEQ";
- case HexagonISD::VCMPWGT: return "HexagonISD::VCMPWGT";
- case HexagonISD::VCMPWGTU: return "HexagonISD::VCMPWGTU";
case HexagonISD::VCOMBINE: return "HexagonISD::VCOMBINE";
case HexagonISD::VPACK: return "HexagonISD::VPACK";
- case HexagonISD::VSHLH: return "HexagonISD::VSHLH";
- case HexagonISD::VSHLW: return "HexagonISD::VSHLW";
- case HexagonISD::VSPLATB: return "HexagonISD::VSPLTB";
- case HexagonISD::VSPLATH: return "HexagonISD::VSPLATH";
- case HexagonISD::VSRAH: return "HexagonISD::VSRAH";
- case HexagonISD::VSRAW: return "HexagonISD::VSRAW";
- case HexagonISD::VSRLH: return "HexagonISD::VSRLH";
- case HexagonISD::VSRLW: return "HexagonISD::VSRLW";
- case HexagonISD::VSXTBH: return "HexagonISD::VSXTBH";
- case HexagonISD::VSXTBW: return "HexagonISD::VSXTBW";
+ case HexagonISD::VASL: return "HexagonISD::VASL";
+ case HexagonISD::VASR: return "HexagonISD::VASR";
+ case HexagonISD::VLSR: return "HexagonISD::VLSR";
+ case HexagonISD::VSPLAT: return "HexagonISD::VSPLAT";
case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
case HexagonISD::OP_END: break;
}
@@ -2503,13 +2487,13 @@ HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
if (VT.getSimpleVT() == MVT::v4i16) {
switch (Op.getOpcode()) {
case ISD::SRA:
- Result = DAG.getNode(HexagonISD::VSRAH, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VASR, dl, VT, V3, CommonSplat);
break;
case ISD::SHL:
- Result = DAG.getNode(HexagonISD::VSHLH, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VASL, dl, VT, V3, CommonSplat);
break;
case ISD::SRL:
- Result = DAG.getNode(HexagonISD::VSRLH, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VLSR, dl, VT, V3, CommonSplat);
break;
default:
return SDValue();
@@ -2517,13 +2501,13 @@ HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
} else if (VT.getSimpleVT() == MVT::v2i32) {
switch (Op.getOpcode()) {
case ISD::SRA:
- Result = DAG.getNode(HexagonISD::VSRAW, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VASR, dl, VT, V3, CommonSplat);
break;
case ISD::SHL:
- Result = DAG.getNode(HexagonISD::VSHLW, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VASL, dl, VT, V3, CommonSplat);
break;
case ISD::SRL:
- Result = DAG.getNode(HexagonISD::VSRLW, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VLSR, dl, VT, V3, CommonSplat);
break;
default:
return SDValue();
diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h
index 1415156487c0..bfd2c94eeaba 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/lib/Target/Hexagon/HexagonISelLowering.h
@@ -52,29 +52,10 @@ namespace HexagonISD {
COMBINE,
PACKHL,
- VSPLATB,
- VSPLATH,
- SHUFFEB,
- SHUFFEH,
- SHUFFOB,
- SHUFFOH,
- VSXTBH,
- VSXTBW,
- VSRAW,
- VSRAH,
- VSRLW,
- VSRLH,
- VSHLW,
- VSHLH,
- VCMPBEQ,
- VCMPBGT,
- VCMPBGTU,
- VCMPHEQ,
- VCMPHGT,
- VCMPHGTU,
- VCMPWEQ,
- VCMPWGT,
- VCMPWGTU,
+ VSPLAT,
+ VASL,
+ VASR,
+ VLSR,
INSERT,
INSERTRP,
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 1eac2d3dd8e2..c77c669f4ca7 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -250,15 +250,19 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
case Hexagon::L2_loadri_io:
case Hexagon::L2_loadrd_io:
case Hexagon::V6_vL32b_ai:
+ case Hexagon::V6_vL32b_nt_ai:
case Hexagon::V6_vL32b_ai_128B:
+ case Hexagon::V6_vL32b_nt_ai_128B:
case Hexagon::V6_vL32Ub_ai:
case Hexagon::V6_vL32Ub_ai_128B:
case Hexagon::LDriw_pred:
case Hexagon::LDriw_mod:
case Hexagon::PS_vloadrq_ai:
case Hexagon::PS_vloadrw_ai:
+ case Hexagon::PS_vloadrw_nt_ai:
case Hexagon::PS_vloadrq_ai_128B:
- case Hexagon::PS_vloadrw_ai_128B: {
+ case Hexagon::PS_vloadrw_ai_128B:
+ case Hexagon::PS_vloadrw_nt_ai_128B: {
const MachineOperand OpFI = MI.getOperand(1);
if (!OpFI.isFI())
return 0;
@@ -1726,6 +1730,39 @@ bool HexagonInstrInfo::getIncrementValue(const MachineInstr &MI,
return false;
}
+std::pair<unsigned, unsigned>
+HexagonInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
+ return std::make_pair(TF & ~HexagonII::MO_Bitmasks,
+ TF & HexagonII::MO_Bitmasks);
+}
+
+ArrayRef<std::pair<unsigned, const char*>>
+HexagonInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
+ using namespace HexagonII;
+ static const std::pair<unsigned, const char*> Flags[] = {
+ {MO_PCREL, "hexagon-pcrel"},
+ {MO_GOT, "hexagon-got"},
+ {MO_LO16, "hexagon-lo16"},
+ {MO_HI16, "hexagon-hi16"},
+ {MO_GPREL, "hexagon-gprel"},
+ {MO_GDGOT, "hexagon-gdgot"},
+ {MO_GDPLT, "hexagon-gdplt"},
+ {MO_IE, "hexagon-ie"},
+ {MO_IEGOT, "hexagon-iegot"},
+ {MO_TPREL, "hexagon-tprel"}
+ };
+ return makeArrayRef(Flags);
+}
+
+ArrayRef<std::pair<unsigned, const char*>>
+HexagonInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
+ using namespace HexagonII;
+ static const std::pair<unsigned, const char*> Flags[] = {
+ {HMOTF_ConstExtended, "hexagon-ext"}
+ };
+ return makeArrayRef(Flags);
+}
+
unsigned HexagonInstrInfo::createVR(MachineFunction *MF, MVT VT) const {
MachineRegisterInfo &MRI = MF->getRegInfo();
const TargetRegisterClass *TRC;
@@ -1797,7 +1834,7 @@ bool HexagonInstrInfo::isConstExtended(const MachineInstr &MI) const {
const MachineOperand &MO = MI.getOperand(ExtOpNum);
// Use MO operand flags to determine if MO
// has the HMOTF_ConstExtended flag set.
- if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended)
+ if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
return true;
// If this is a Machine BB address we are talking about, and it is
// not marked as extended, say so.
@@ -1807,9 +1844,6 @@ bool HexagonInstrInfo::isConstExtended(const MachineInstr &MI) const {
// We could be using an instruction with an extendable immediate and shoehorn
// a global address into it. If it is a global address it will be constant
// extended. We do this for COMBINE.
- // We currently only handle isGlobal() because it is the only kind of
- // object we are going to end up with here for now.
- // In the future we probably should add isSymbol(), etc.
if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
MO.isJTI() || MO.isCPI() || MO.isFPImm())
return true;
@@ -1961,11 +1995,9 @@ bool HexagonInstrInfo::isExtended(const MachineInstr &MI) const {
return true;
// Use MO operand flags to determine if one of MI's operands
// has HMOTF_ConstExtended flag set.
- for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
- E = MI.operands_end(); I != E; ++I) {
- if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
+ for (const MachineOperand &MO : MI.operands())
+ if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
return true;
- }
return false;
}
@@ -2445,20 +2477,28 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
switch (Opcode) {
case Hexagon::PS_vstorerq_ai:
case Hexagon::PS_vstorerw_ai:
+ case Hexagon::PS_vstorerw_nt_ai:
case Hexagon::PS_vloadrq_ai:
case Hexagon::PS_vloadrw_ai:
+ case Hexagon::PS_vloadrw_nt_ai:
case Hexagon::V6_vL32b_ai:
case Hexagon::V6_vS32b_ai:
+ case Hexagon::V6_vL32b_nt_ai:
+ case Hexagon::V6_vS32b_nt_ai:
case Hexagon::V6_vL32Ub_ai:
case Hexagon::V6_vS32Ub_ai:
return isShiftedInt<4,6>(Offset);
case Hexagon::PS_vstorerq_ai_128B:
case Hexagon::PS_vstorerw_ai_128B:
+ case Hexagon::PS_vstorerw_nt_ai_128B:
case Hexagon::PS_vloadrq_ai_128B:
case Hexagon::PS_vloadrw_ai_128B:
+ case Hexagon::PS_vloadrw_nt_ai_128B:
case Hexagon::V6_vL32b_ai_128B:
case Hexagon::V6_vS32b_ai_128B:
+ case Hexagon::V6_vL32b_nt_ai_128B:
+ case Hexagon::V6_vS32b_nt_ai_128B:
case Hexagon::V6_vL32Ub_ai_128B:
case Hexagon::V6_vS32Ub_ai_128B:
return isShiftedInt<4,7>(Offset);
@@ -3170,11 +3210,19 @@ int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const {
return Hexagon::V6_vL32b_cur_pi;
case Hexagon::V6_vL32b_ai:
return Hexagon::V6_vL32b_cur_ai;
+ case Hexagon::V6_vL32b_nt_pi:
+ return Hexagon::V6_vL32b_nt_cur_pi;
+ case Hexagon::V6_vL32b_nt_ai:
+ return Hexagon::V6_vL32b_nt_cur_ai;
//128B
case Hexagon::V6_vL32b_pi_128B:
return Hexagon::V6_vL32b_cur_pi_128B;
case Hexagon::V6_vL32b_ai_128B:
return Hexagon::V6_vL32b_cur_ai_128B;
+ case Hexagon::V6_vL32b_nt_pi_128B:
+ return Hexagon::V6_vL32b_nt_cur_pi_128B;
+ case Hexagon::V6_vL32b_nt_ai_128B:
+ return Hexagon::V6_vL32b_nt_cur_ai_128B;
}
return 0;
}
@@ -3187,11 +3235,19 @@ int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
return Hexagon::V6_vL32b_pi;
case Hexagon::V6_vL32b_cur_ai:
return Hexagon::V6_vL32b_ai;
+ case Hexagon::V6_vL32b_nt_cur_pi:
+ return Hexagon::V6_vL32b_nt_pi;
+ case Hexagon::V6_vL32b_nt_cur_ai:
+ return Hexagon::V6_vL32b_nt_ai;
//128B
case Hexagon::V6_vL32b_cur_pi_128B:
return Hexagon::V6_vL32b_pi_128B;
case Hexagon::V6_vL32b_cur_ai_128B:
return Hexagon::V6_vL32b_ai_128B;
+ case Hexagon::V6_vL32b_nt_cur_pi_128B:
+ return Hexagon::V6_vL32b_nt_pi_128B;
+ case Hexagon::V6_vL32b_nt_cur_ai_128B:
+ return Hexagon::V6_vL32b_nt_ai_128B;
}
return 0;
}
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index 944d0161a7c8..0436ce3ac475 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -301,6 +301,27 @@ public:
const MachineInstr &UseMI,
unsigned UseIdx) const override;
+ /// Decompose the machine operand's target flags into two values - the direct
+ /// target flag value and any of bit flags that are applied.
+ std::pair<unsigned, unsigned>
+ decomposeMachineOperandsTargetFlags(unsigned TF) const override;
+
+ /// Return an array that contains the direct target flag values and their
+ /// names.
+ ///
+ /// MIR Serialization is able to serialize only the target flags that are
+ /// defined by this method.
+ ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableDirectMachineOperandTargetFlags() const override;
+
+ /// Return an array that contains the bitmask target flag values and their
+ /// names.
+ ///
+ /// MIR Serialization is able to serialize only the target flags that are
+ /// defined by this method.
+ ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableBitmaskMachineOperandTargetFlags() const override;
+
bool isTailCall(const MachineInstr &MI) const override;
/// HexagonInstrInfo specifics.
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 4602de979024..1a26805d190d 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -49,7 +49,7 @@ static cl::opt<bool> CheckEarlyAvail("check-early-avail", cl::Hidden,
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
namespace {
class HexagonCallMutation : public ScheduleDAGMutation {
diff --git a/lib/Target/Hexagon/HexagonPatterns.td b/lib/Target/Hexagon/HexagonPatterns.td
index 689419638f54..ba98b8994937 100644
--- a/lib/Target/Hexagon/HexagonPatterns.td
+++ b/lib/Target/Hexagon/HexagonPatterns.td
@@ -2770,6 +2770,9 @@ def unalignedstore : PatFrag<(ops node:$val, node:$addr), (store $val, $addr), [
multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Aligned stores
+ def : Pat<(alignednontemporalstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
+ (V6_vS32b_nt_ai IntRegs:$addr, 0, (VTSgl VectorRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
def : Pat<(alignedstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
(V6_vS32b_ai IntRegs:$addr, 0, (VTSgl VectorRegs:$src1))>,
Requires<[UseHVXSgl]>;
@@ -2778,6 +2781,9 @@ multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
Requires<[UseHVXSgl]>;
// 128B Aligned stores
+ def : Pat<(alignednontemporalstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
+ (V6_vS32b_nt_ai_128B IntRegs:$addr, 0, (VTDbl VectorRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
def : Pat<(alignedstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
(V6_vS32b_ai_128B IntRegs:$addr, 0, (VTDbl VectorRegs128B:$src1))>,
Requires<[UseHVXDbl]>;
@@ -2787,6 +2793,11 @@ multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Fold Add R+OFF into vector store.
let AddedComplexity = 10 in {
+ def : Pat<(alignednontemporalstore (VTSgl VectorRegs:$src1),
+ (add IntRegs:$src2, Iss4_6:$offset)),
+ (V6_vS32b_nt_ai IntRegs:$src2, Iss4_6:$offset,
+ (VTSgl VectorRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
def : Pat<(alignedstore (VTSgl VectorRegs:$src1),
(add IntRegs:$src2, Iss4_6:$offset)),
(V6_vS32b_ai IntRegs:$src2, Iss4_6:$offset,
@@ -2799,6 +2810,11 @@ multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
Requires<[UseHVXSgl]>;
// Fold Add R+OFF into vector store 128B.
+ def : Pat<(alignednontemporalstore (VTDbl VectorRegs128B:$src1),
+ (add IntRegs:$src2, Iss4_7:$offset)),
+ (V6_vS32b_nt_ai_128B IntRegs:$src2, Iss4_7:$offset,
+ (VTDbl VectorRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
def : Pat<(alignedstore (VTDbl VectorRegs128B:$src1),
(add IntRegs:$src2, Iss4_7:$offset)),
(V6_vS32b_ai_128B IntRegs:$src2, Iss4_7:$offset,
@@ -2820,6 +2836,9 @@ defm : vS32b_ai_pats <v8i64, v16i64>;
multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Aligned loads
+ def : Pat < (VTSgl (alignednontemporalload IntRegs:$addr)),
+ (V6_vL32b_nt_ai IntRegs:$addr, 0) >,
+ Requires<[UseHVXSgl]>;
def : Pat < (VTSgl (alignedload IntRegs:$addr)),
(V6_vL32b_ai IntRegs:$addr, 0) >,
Requires<[UseHVXSgl]>;
@@ -2828,6 +2847,9 @@ multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
Requires<[UseHVXSgl]>;
// 128B Load
+ def : Pat < (VTDbl (alignednontemporalload IntRegs:$addr)),
+ (V6_vL32b_nt_ai_128B IntRegs:$addr, 0) >,
+ Requires<[UseHVXDbl]>;
def : Pat < (VTDbl (alignedload IntRegs:$addr)),
(V6_vL32b_ai_128B IntRegs:$addr, 0) >,
Requires<[UseHVXDbl]>;
@@ -2837,6 +2859,9 @@ multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Fold Add R+OFF into vector load.
let AddedComplexity = 10 in {
+ def : Pat<(VTDbl (alignednontemporalload (add IntRegs:$src2, Iss4_7:$offset))),
+ (V6_vL32b_nt_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
+ Requires<[UseHVXDbl]>;
def : Pat<(VTDbl (alignedload (add IntRegs:$src2, Iss4_7:$offset))),
(V6_vL32b_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
Requires<[UseHVXDbl]>;
@@ -2844,6 +2869,9 @@ multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
(V6_vL32Ub_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
Requires<[UseHVXDbl]>;
+ def : Pat<(VTSgl (alignednontemporalload (add IntRegs:$src2, Iss4_6:$offset))),
+ (V6_vL32b_nt_ai IntRegs:$src2, Iss4_6:$offset)>,
+ Requires<[UseHVXSgl]>;
def : Pat<(VTSgl (alignedload (add IntRegs:$src2, Iss4_6:$offset))),
(V6_vL32b_ai IntRegs:$src2, Iss4_6:$offset)>,
Requires<[UseHVXSgl]>;
@@ -2859,6 +2887,9 @@ defm : vL32b_ai_pats <v16i32, v32i32>;
defm : vL32b_ai_pats <v8i64, v16i64>;
multiclass STrivv_pats <ValueType VTSgl, ValueType VTDbl> {
+ def : Pat<(alignednontemporalstore (VTSgl VecDblRegs:$src1), IntRegs:$addr),
+ (PS_vstorerw_nt_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
def : Pat<(alignedstore (VTSgl VecDblRegs:$src1), IntRegs:$addr),
(PS_vstorerw_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
Requires<[UseHVXSgl]>;
@@ -2866,6 +2897,10 @@ multiclass STrivv_pats <ValueType VTSgl, ValueType VTDbl> {
(PS_vstorerwu_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
Requires<[UseHVXSgl]>;
+ def : Pat<(alignednontemporalstore (VTDbl VecDblRegs128B:$src1), IntRegs:$addr),
+ (PS_vstorerw_nt_ai_128B IntRegs:$addr, 0,
+ (VTDbl VecDblRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
def : Pat<(alignedstore (VTDbl VecDblRegs128B:$src1), IntRegs:$addr),
(PS_vstorerw_ai_128B IntRegs:$addr, 0,
(VTDbl VecDblRegs128B:$src1))>,
@@ -2882,6 +2917,9 @@ defm : STrivv_pats <v32i32, v64i32>;
defm : STrivv_pats <v16i64, v32i64>;
multiclass LDrivv_pats <ValueType VTSgl, ValueType VTDbl> {
+ def : Pat<(VTSgl (alignednontemporalload I32:$addr)),
+ (PS_vloadrw_nt_ai I32:$addr, 0)>,
+ Requires<[UseHVXSgl]>;
def : Pat<(VTSgl (alignedload I32:$addr)),
(PS_vloadrw_ai I32:$addr, 0)>,
Requires<[UseHVXSgl]>;
@@ -2889,6 +2927,9 @@ multiclass LDrivv_pats <ValueType VTSgl, ValueType VTDbl> {
(PS_vloadrwu_ai I32:$addr, 0)>,
Requires<[UseHVXSgl]>;
+ def : Pat<(VTDbl (alignednontemporalload I32:$addr)),
+ (PS_vloadrw_nt_ai_128B I32:$addr, 0)>,
+ Requires<[UseHVXDbl]>;
def : Pat<(VTDbl (alignedload I32:$addr)),
(PS_vloadrw_ai_128B I32:$addr, 0)>,
Requires<[UseHVXDbl]>;
@@ -3021,16 +3062,16 @@ def : Pat<(v2i16 (add (v2i16 IntRegs:$src1), (v2i16 IntRegs:$src2))),
def : Pat<(v2i16 (sub (v2i16 IntRegs:$src1), (v2i16 IntRegs:$src2))),
(A2_svsubh IntRegs:$src1, IntRegs:$src2)>;
-def HexagonVSPLATB: SDNode<"HexagonISD::VSPLATB", SDTUnaryOp>;
-def HexagonVSPLATH: SDNode<"HexagonISD::VSPLATH", SDTUnaryOp>;
+def SDTHexagonVSPLAT: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
+def HexagonVSPLAT: SDNode<"HexagonISD::VSPLAT", SDTHexagonVSPLAT>;
// Replicate the low 8-bits from 32-bits input register into each of the
// four bytes of 32-bits destination register.
-def: Pat<(v4i8 (HexagonVSPLATB I32:$Rs)), (S2_vsplatrb I32:$Rs)>;
+def: Pat<(v4i8 (HexagonVSPLAT I32:$Rs)), (S2_vsplatrb I32:$Rs)>;
// Replicate the low 16-bits from 32-bits input register into each of the
// four halfwords of 64-bits destination register.
-def: Pat<(v4i16 (HexagonVSPLATH I32:$Rs)), (S2_vsplatrh I32:$Rs)>;
+def: Pat<(v4i16 (HexagonVSPLAT I32:$Rs)), (S2_vsplatrh I32:$Rs)>;
class VArith_pat <InstHexagon MI, SDNode Op, PatFrag Type>
@@ -3068,84 +3109,44 @@ def: Pat<(v2i32 (shl V2I32:$b, (i64 (HexagonCOMBINE (i32 u5_0ImmPred:$c),
(i32 u5_0ImmPred:$c))))),
(S2_asl_i_vw V2I32:$b, imm:$c)>;
-def: Pat<(v4i16 (sra V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4_0ImmPred:$c)))))),
+def: Pat<(v4i16 (sra V4I16:$b, (v4i16 (HexagonVSPLAT u4_0ImmPred:$c)))),
(S2_asr_i_vh V4I16:$b, imm:$c)>;
-def: Pat<(v4i16 (srl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4_0ImmPred:$c)))))),
+def: Pat<(v4i16 (srl V4I16:$b, (v4i16 (HexagonVSPLAT u4_0ImmPred:$c)))),
(S2_lsr_i_vh V4I16:$b, imm:$c)>;
-def: Pat<(v4i16 (shl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4_0ImmPred:$c)))))),
+def: Pat<(v4i16 (shl V4I16:$b, (v4i16 (HexagonVSPLAT u4_0ImmPred:$c)))),
(S2_asl_i_vh V4I16:$b, imm:$c)>;
-def SDTHexagon_v2i32_v2i32_i32 : SDTypeProfile<1, 2,
- [SDTCisSameAs<0, 1>, SDTCisVT<0, v2i32>, SDTCisInt<2>]>;
-def SDTHexagon_v4i16_v4i16_i32 : SDTypeProfile<1, 2,
- [SDTCisSameAs<0, 1>, SDTCisVT<0, v4i16>, SDTCisInt<2>]>;
+def SDTHexagonVShift
+ : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisVec<0>, SDTCisVT<2, i32>]>;
-def HexagonVSRAW: SDNode<"HexagonISD::VSRAW", SDTHexagon_v2i32_v2i32_i32>;
-def HexagonVSRAH: SDNode<"HexagonISD::VSRAH", SDTHexagon_v4i16_v4i16_i32>;
-def HexagonVSRLW: SDNode<"HexagonISD::VSRLW", SDTHexagon_v2i32_v2i32_i32>;
-def HexagonVSRLH: SDNode<"HexagonISD::VSRLH", SDTHexagon_v4i16_v4i16_i32>;
-def HexagonVSHLW: SDNode<"HexagonISD::VSHLW", SDTHexagon_v2i32_v2i32_i32>;
-def HexagonVSHLH: SDNode<"HexagonISD::VSHLH", SDTHexagon_v4i16_v4i16_i32>;
+def HexagonVASL: SDNode<"HexagonISD::VASL", SDTHexagonVShift>;
+def HexagonVASR: SDNode<"HexagonISD::VASR", SDTHexagonVShift>;
+def HexagonVLSR: SDNode<"HexagonISD::VLSR", SDTHexagonVShift>;
-def: Pat<(v2i32 (HexagonVSRAW V2I32:$Rs, u5_0ImmPred:$u5)),
+def: Pat<(v2i32 (HexagonVASL V2I32:$Rs, u5_0ImmPred:$u5)),
+ (S2_asl_i_vw V2I32:$Rs, imm:$u5)>;
+def: Pat<(v4i16 (HexagonVASL V4I16:$Rs, u4_0ImmPred:$u4)),
+ (S2_asl_i_vh V4I16:$Rs, imm:$u4)>;
+def: Pat<(v2i32 (HexagonVASR V2I32:$Rs, u5_0ImmPred:$u5)),
(S2_asr_i_vw V2I32:$Rs, imm:$u5)>;
-def: Pat<(v4i16 (HexagonVSRAH V4I16:$Rs, u4_0ImmPred:$u4)),
+def: Pat<(v4i16 (HexagonVASR V4I16:$Rs, u4_0ImmPred:$u4)),
(S2_asr_i_vh V4I16:$Rs, imm:$u4)>;
-def: Pat<(v2i32 (HexagonVSRLW V2I32:$Rs, u5_0ImmPred:$u5)),
+def: Pat<(v2i32 (HexagonVLSR V2I32:$Rs, u5_0ImmPred:$u5)),
(S2_lsr_i_vw V2I32:$Rs, imm:$u5)>;
-def: Pat<(v4i16 (HexagonVSRLH V4I16:$Rs, u4_0ImmPred:$u4)),
+def: Pat<(v4i16 (HexagonVLSR V4I16:$Rs, u4_0ImmPred:$u4)),
(S2_lsr_i_vh V4I16:$Rs, imm:$u4)>;
-def: Pat<(v2i32 (HexagonVSHLW V2I32:$Rs, u5_0ImmPred:$u5)),
- (S2_asl_i_vw V2I32:$Rs, imm:$u5)>;
-def: Pat<(v4i16 (HexagonVSHLH V4I16:$Rs, u4_0ImmPred:$u4)),
- (S2_asl_i_vh V4I16:$Rs, imm:$u4)>;
class vshift_rr_pat<InstHexagon MI, SDNode Op, PatFrag Value>
: Pat <(Op Value:$Rs, I32:$Rt),
(MI Value:$Rs, I32:$Rt)>;
-def: vshift_rr_pat <S2_asr_r_vw, HexagonVSRAW, V2I32>;
-def: vshift_rr_pat <S2_asr_r_vh, HexagonVSRAH, V4I16>;
-def: vshift_rr_pat <S2_lsr_r_vw, HexagonVSRLW, V2I32>;
-def: vshift_rr_pat <S2_lsr_r_vh, HexagonVSRLH, V4I16>;
-def: vshift_rr_pat <S2_asl_r_vw, HexagonVSHLW, V2I32>;
-def: vshift_rr_pat <S2_asl_r_vh, HexagonVSHLH, V4I16>;
-
-
-def SDTHexagonVecCompare_v8i8 : SDTypeProfile<1, 2,
- [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v8i8>]>;
-def SDTHexagonVecCompare_v4i16 : SDTypeProfile<1, 2,
- [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v4i16>]>;
-def SDTHexagonVecCompare_v2i32 : SDTypeProfile<1, 2,
- [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v2i32>]>;
-
-def HexagonVCMPBEQ: SDNode<"HexagonISD::VCMPBEQ", SDTHexagonVecCompare_v8i8>;
-def HexagonVCMPBGT: SDNode<"HexagonISD::VCMPBGT", SDTHexagonVecCompare_v8i8>;
-def HexagonVCMPBGTU: SDNode<"HexagonISD::VCMPBGTU", SDTHexagonVecCompare_v8i8>;
-def HexagonVCMPHEQ: SDNode<"HexagonISD::VCMPHEQ", SDTHexagonVecCompare_v4i16>;
-def HexagonVCMPHGT: SDNode<"HexagonISD::VCMPHGT", SDTHexagonVecCompare_v4i16>;
-def HexagonVCMPHGTU: SDNode<"HexagonISD::VCMPHGTU", SDTHexagonVecCompare_v4i16>;
-def HexagonVCMPWEQ: SDNode<"HexagonISD::VCMPWEQ", SDTHexagonVecCompare_v2i32>;
-def HexagonVCMPWGT: SDNode<"HexagonISD::VCMPWGT", SDTHexagonVecCompare_v2i32>;
-def HexagonVCMPWGTU: SDNode<"HexagonISD::VCMPWGTU", SDTHexagonVecCompare_v2i32>;
-
-
-class vcmp_i1_pat<InstHexagon MI, SDNode Op, PatFrag Value>
- : Pat <(i1 (Op Value:$Rs, Value:$Rt)),
- (MI Value:$Rs, Value:$Rt)>;
-
-def: vcmp_i1_pat<A2_vcmpbeq, HexagonVCMPBEQ, V8I8>;
-def: vcmp_i1_pat<A4_vcmpbgt, HexagonVCMPBGT, V8I8>;
-def: vcmp_i1_pat<A2_vcmpbgtu, HexagonVCMPBGTU, V8I8>;
-
-def: vcmp_i1_pat<A2_vcmpheq, HexagonVCMPHEQ, V4I16>;
-def: vcmp_i1_pat<A2_vcmphgt, HexagonVCMPHGT, V4I16>;
-def: vcmp_i1_pat<A2_vcmphgtu, HexagonVCMPHGTU, V4I16>;
-
-def: vcmp_i1_pat<A2_vcmpweq, HexagonVCMPWEQ, V2I32>;
-def: vcmp_i1_pat<A2_vcmpwgt, HexagonVCMPWGT, V2I32>;
-def: vcmp_i1_pat<A2_vcmpwgtu, HexagonVCMPWGTU, V2I32>;
+def: vshift_rr_pat <S2_asl_r_vw, HexagonVASL, V2I32>;
+def: vshift_rr_pat <S2_asl_r_vh, HexagonVASL, V4I16>;
+def: vshift_rr_pat <S2_asr_r_vw, HexagonVASR, V2I32>;
+def: vshift_rr_pat <S2_asr_r_vh, HexagonVASR, V4I16>;
+def: vshift_rr_pat <S2_lsr_r_vw, HexagonVLSR, V2I32>;
+def: vshift_rr_pat <S2_lsr_r_vh, HexagonVLSR, V4I16>;
class vcmp_vi1_pat<InstHexagon MI, PatFrag Op, PatFrag InVal, ValueType OutTy>
@@ -3255,13 +3256,6 @@ def: Pat<(v4i8 (trunc V4I16:$Rs)),
def: Pat<(v2i16 (trunc V2I32:$Rs)),
(LoReg (S2_packhl (HiReg $Rs), (LoReg $Rs)))>;
-
-def HexagonVSXTBH : SDNode<"HexagonISD::VSXTBH", SDTUnaryOp>;
-def HexagonVSXTBW : SDNode<"HexagonISD::VSXTBW", SDTUnaryOp>;
-
-def: Pat<(i64 (HexagonVSXTBH I32:$Rs)), (S2_vsxtbh I32:$Rs)>;
-def: Pat<(i64 (HexagonVSXTBW I32:$Rs)), (S2_vsxthw I32:$Rs)>;
-
def: Pat<(v4i16 (zext V4I8:$Rs)), (S2_vzxtbh V4I8:$Rs)>;
def: Pat<(v2i32 (zext V2I16:$Rs)), (S2_vzxthw V2I16:$Rs)>;
def: Pat<(v4i16 (anyext V4I8:$Rs)), (S2_vzxtbh V4I8:$Rs)>;
@@ -3322,31 +3316,6 @@ def: Pat<(v8i8 (mul V8I8:$Rs, V8I8:$Rt)),
(A2_combinew (S2_vtrunehb (VMPYB_no_V5 (HiReg $Rs), (HiReg $Rt))),
(S2_vtrunehb (VMPYB_no_V5 (LoReg $Rs), (LoReg $Rt))))>;
-def SDTHexagonBinOp64 : SDTypeProfile<1, 2,
- [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>]>;
-
-def HexagonSHUFFEB: SDNode<"HexagonISD::SHUFFEB", SDTHexagonBinOp64>;
-def HexagonSHUFFEH: SDNode<"HexagonISD::SHUFFEH", SDTHexagonBinOp64>;
-def HexagonSHUFFOB: SDNode<"HexagonISD::SHUFFOB", SDTHexagonBinOp64>;
-def HexagonSHUFFOH: SDNode<"HexagonISD::SHUFFOH", SDTHexagonBinOp64>;
-
-class ShufflePat<InstHexagon MI, SDNode Op>
- : Pat<(i64 (Op DoubleRegs:$src1, DoubleRegs:$src2)),
- (i64 (MI DoubleRegs:$src1, DoubleRegs:$src2))>;
-
-// Shuffles even bytes for i=0..3: A[2*i].b = C[2*i].b; A[2*i+1].b = B[2*i].b
-def: ShufflePat<S2_shuffeb, HexagonSHUFFEB>;
-
-// Shuffles odd bytes for i=0..3: A[2*i].b = C[2*i+1].b; A[2*i+1].b = B[2*i+1].b
-def: ShufflePat<S2_shuffob, HexagonSHUFFOB>;
-
-// Shuffles even half for i=0,1: A[2*i].h = C[2*i].h; A[2*i+1].h = B[2*i].h
-def: ShufflePat<S2_shuffeh, HexagonSHUFFEH>;
-
-// Shuffles odd half for i=0,1: A[2*i].h = C[2*i+1].h; A[2*i+1].h = B[2*i+1].h
-def: ShufflePat<S2_shuffoh, HexagonSHUFFOH>;
-
-
// Truncated store from v4i16 to v4i8.
def truncstorev4i8: PatFrag<(ops node:$val, node:$ptr),
(truncstore node:$val, node:$ptr),
diff --git a/lib/Target/Hexagon/HexagonPseudo.td b/lib/Target/Hexagon/HexagonPseudo.td
index 93fb688fc1c0..b42c1ab975a8 100644
--- a/lib/Target/Hexagon/HexagonPseudo.td
+++ b/lib/Target/Hexagon/HexagonPseudo.td
@@ -407,6 +407,11 @@ def PS_vstorerw_ai: STrivv_template<VecDblRegs, V6_vS32b_ai>,
def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
+def PS_vstorerw_nt_ai: STrivv_template<VecDblRegs, V6_vS32b_nt_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vstorerw_nt_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32b_nt_ai_128B>,
+ Requires<[HasV60T,UseHVXDbl]>;
+
def PS_vstorerwu_ai: STrivv_template<VecDblRegs, V6_vS32Ub_ai>,
Requires<[HasV60T,UseHVXSgl]>;
def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32Ub_ai_128B>,
@@ -433,6 +438,11 @@ def PS_vloadrw_ai: LDrivv_template<VecDblRegs, V6_vL32b_ai>,
def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
+def PS_vloadrw_nt_ai: LDrivv_template<VecDblRegs, V6_vL32b_nt_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vloadrw_nt_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32b_nt_ai_128B>,
+ Requires<[HasV60T,UseHVXDbl]>;
+
def PS_vloadrwu_ai: LDrivv_template<VecDblRegs, V6_vL32Ub_ai>,
Requires<[HasV60T,UseHVXSgl]>;
def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32Ub_ai_128B>,
diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp
index db268b78cd73..4fa929a20810 100644
--- a/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -350,6 +350,8 @@ int32_t HexagonSplitDoubleRegs::profit(const MachineInstr *MI) const {
MI->getOperand(2).getImm());
case Hexagon::A4_combineri:
ImmX++;
+ // Fall through into A4_combineir.
+ LLVM_FALLTHROUGH;
case Hexagon::A4_combineir: {
ImmX++;
int64_t V = MI->getOperand(ImmX).getImm();
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 76d9b31b005f..7d88b51f32dd 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -110,10 +110,11 @@ SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler",
namespace llvm {
extern char &HexagonExpandCondsetsID;
void initializeHexagonExpandCondsetsPass(PassRegistry&);
- void initializeHexagonLoopIdiomRecognizePass(PassRegistry&);
void initializeHexagonGenMuxPass(PassRegistry&);
- void initializeHexagonOptAddrModePass(PassRegistry&);
+ void initializeHexagonLoopIdiomRecognizePass(PassRegistry&);
void initializeHexagonNewValueJumpPass(PassRegistry&);
+ void initializeHexagonOptAddrModePass(PassRegistry&);
+ void initializeHexagonPacketizerPass(PassRegistry&);
Pass *createHexagonLoopIdiomPass();
FunctionPass *createHexagonBitSimplify();
@@ -156,10 +157,11 @@ extern "C" void LLVMInitializeHexagonTarget() {
RegisterTargetMachine<HexagonTargetMachine> X(getTheHexagonTarget());
PassRegistry &PR = *PassRegistry::getPassRegistry();
- initializeHexagonLoopIdiomRecognizePass(PR);
initializeHexagonGenMuxPass(PR);
- initializeHexagonOptAddrModePass(PR);
+ initializeHexagonLoopIdiomRecognizePass(PR);
initializeHexagonNewValueJumpPass(PR);
+ initializeHexagonOptAddrModePass(PR);
+ initializeHexagonPacketizerPass(PR);
}
HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index 7667bfb7a0eb..a3021e3dfe43 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -60,9 +60,7 @@ namespace {
class HexagonPacketizer : public MachineFunctionPass {
public:
static char ID;
- HexagonPacketizer() : MachineFunctionPass(ID) {
- initializeHexagonPacketizerPass(*PassRegistry::getPassRegistry());
- }
+ HexagonPacketizer() : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
@@ -89,14 +87,14 @@ namespace {
char HexagonPacketizer::ID = 0;
}
-INITIALIZE_PASS_BEGIN(HexagonPacketizer, "packets", "Hexagon Packetizer",
- false, false)
+INITIALIZE_PASS_BEGIN(HexagonPacketizer, "hexagon-packetizer",
+ "Hexagon Packetizer", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
-INITIALIZE_PASS_END(HexagonPacketizer, "packets", "Hexagon Packetizer",
- false, false)
+INITIALIZE_PASS_END(HexagonPacketizer, "hexagon-packetizer",
+ "Hexagon Packetizer", false, false)
HexagonPacketizerList::HexagonPacketizerList(MachineFunction &MF,
MachineLoopInfo &MLI, AliasAnalysis *AA,
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index 34d0b55aa22a..2a0edda8dcee 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -412,7 +412,7 @@ public:
/// fixup kind as appropriate.
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t FixupValue, bool IsPCRel) const override {
+ uint64_t FixupValue, bool IsResolved) const override {
// When FixupValue is 0 the relocation is external and there
// is nothing for us to do.
@@ -442,6 +442,7 @@ public:
case fixup_Hexagon_B7_PCREL:
if (!(isIntN(7, sValue)))
HandleFixupError(7, 2, (int64_t)FixupValue, "B7_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B7_PCREL_X:
InstMask = 0x00001f18; // Word32_B7
Reloc = (((Value >> 2) & 0x1f) << 8) | // Value 6-2 = Target 12-8
@@ -451,6 +452,7 @@ public:
case fixup_Hexagon_B9_PCREL:
if (!(isIntN(9, sValue)))
HandleFixupError(9, 2, (int64_t)FixupValue, "B9_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B9_PCREL_X:
InstMask = 0x003000fe; // Word32_B9
Reloc = (((Value >> 7) & 0x3) << 20) | // Value 8-7 = Target 21-20
@@ -462,6 +464,7 @@ public:
case fixup_Hexagon_B13_PCREL:
if (!(isIntN(13, sValue)))
HandleFixupError(13, 2, (int64_t)FixupValue, "B13_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B13_PCREL_X:
InstMask = 0x00202ffe; // Word32_B13
Reloc = (((Value >> 12) & 0x1) << 21) | // Value 12 = Target 21
@@ -472,6 +475,7 @@ public:
case fixup_Hexagon_B15_PCREL:
if (!(isIntN(15, sValue)))
HandleFixupError(15, 2, (int64_t)FixupValue, "B15_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B15_PCREL_X:
InstMask = 0x00df20fe; // Word32_B15
Reloc = (((Value >> 13) & 0x3) << 22) | // Value 14-13 = Target 23-22
@@ -483,6 +487,7 @@ public:
case fixup_Hexagon_B22_PCREL:
if (!(isIntN(22, sValue)))
HandleFixupError(22, 2, (int64_t)FixupValue, "B22_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B22_PCREL_X:
InstMask = 0x01ff3ffe; // Word32_B22
Reloc = (((Value >> 13) & 0x1ff) << 16) | // Value 21-13 = Target 24-16
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index d8009c5da08e..7f90e83fc8e9 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -169,8 +169,11 @@ namespace HexagonII {
// Hexagon specific MO operand flag mask.
enum HexagonMOTargetFlagVal {
- //===------------------------------------------------------------------===//
- // Hexagon Specific MachineOperand flags.
+ // Hexagon-specific MachineOperand target flags.
+ //
+ // When chaning these, make sure to update
+ // getSerializableDirectMachineOperandTargetFlags and
+ // getSerializableBitmaskMachineOperandTargetFlags if needed.
MO_NO_FLAG,
/// MO_PCREL - On a symbol operand, indicates a PC-relative relocation
@@ -207,10 +210,12 @@ namespace HexagonII {
MO_TPREL,
// HMOTF_ConstExtended
- // Addendum to abovem, indicates a const extended op
+ // Addendum to above, indicates a const extended op
// Can be used as a mask.
- HMOTF_ConstExtended = 0x80
+ HMOTF_ConstExtended = 0x80,
+ // Union of all bitmasks (currently only HMOTF_ConstExtended).
+ MO_Bitmasks = HMOTF_ConstExtended
};
// Hexagon Sub-instruction classes.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index 564d43b45cb8..1604e7c8dc54 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -259,6 +259,7 @@ bool HexagonShuffler::check() {
break;
case HexagonII::TypeCVI_VM_VP_LDU:
++onlyNo1;
+ LLVM_FALLTHROUGH;
case HexagonII::TypeCVI_VM_LD:
case HexagonII::TypeCVI_VM_TMP_LD:
case HexagonII::TypeLD:
@@ -274,6 +275,7 @@ bool HexagonShuffler::check() {
break;
case HexagonII::TypeCVI_VM_STU:
++onlyNo1;
+ LLVM_FALLTHROUGH;
case HexagonII::TypeCVI_VM_ST:
case HexagonII::TypeCVI_VM_NEW_ST:
case HexagonII::TypeST:
diff --git a/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp b/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
index 72e471f5766e..1394ac7210f2 100644
--- a/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
+++ b/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
@@ -787,6 +787,7 @@ std::unique_ptr<LanaiOperand> LanaiAsmParser::parseImmediate() {
case AsmToken::Dot:
if (!Parser.parseExpression(ExprVal))
return LanaiOperand::createImm(ExprVal, Start, End);
+ LLVM_FALLTHROUGH;
default:
return nullptr;
}
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp b/lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp
index c212726113ab..bbce5f670c99 100644
--- a/lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp
@@ -51,7 +51,7 @@ public:
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override;
+ uint64_t Value, bool IsResolved) const override;
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override;
@@ -92,7 +92,7 @@ bool LanaiAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
void LanaiAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
- bool /*IsPCRel*/) const {
+ bool /*IsResolved*/) const {
MCFixupKind Kind = Fixup.getKind();
Value = adjustFixupValue(static_cast<unsigned>(Kind), Value);
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 69b1ba1528d0..b72c9d534478 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -304,6 +304,9 @@ class MipsAsmParser : public MCTargetAsmParser {
bool expandSeqI(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
+ bool expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+
bool reportParseError(Twine ErrorMsg);
bool reportParseError(SMLoc Loc, Twine ErrorMsg);
@@ -343,6 +346,8 @@ class MipsAsmParser : public MCTargetAsmParser {
bool parseSetPushDirective();
bool parseSetSoftFloatDirective();
bool parseSetHardFloatDirective();
+ bool parseSetMtDirective();
+ bool parseSetNoMtDirective();
bool parseSetAssignment();
@@ -628,6 +633,9 @@ public:
bool useSoftFloat() const {
return getSTI().getFeatureBits()[Mips::FeatureSoftFloat];
}
+ bool hasMT() const {
+ return getSTI().getFeatureBits()[Mips::FeatureMT];
+ }
/// Warn if RegIndex is the same as the current AT.
void warnIfRegIndexIsAT(unsigned RegIndex, SMLoc Loc);
@@ -1966,6 +1974,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
case Mips::SDIV_MM:
FirstOp = 0;
SecondOp = 1;
+ LLVM_FALLTHROUGH;
case Mips::SDivMacro:
case Mips::DSDivMacro:
case Mips::UDivMacro:
@@ -2505,6 +2514,16 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
return expandSeq(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SEQIMacro:
return expandSeqI(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
+ case Mips::MFTC0: case Mips::MTTC0:
+ case Mips::MFTGPR: case Mips::MTTGPR:
+ case Mips::MFTLO: case Mips::MTTLO:
+ case Mips::MFTHI: case Mips::MTTHI:
+ case Mips::MFTACX: case Mips::MTTACX:
+ case Mips::MFTDSP: case Mips::MTTDSP:
+ case Mips::MFTC1: case Mips::MTTC1:
+ case Mips::MFTHC1: case Mips::MTTHC1:
+ case Mips::CFTC1: case Mips::CTTC1:
+ return expandMXTRAlias(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
}
}
@@ -4876,6 +4895,212 @@ bool MipsAsmParser::expandSeqI(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
return false;
}
+// Map the DSP accumulator and control register to the corresponding gpr
+// operand. Unlike the other alias, the m(f|t)t(lo|hi|acx) instructions
+// do not map the DSP registers contigously to gpr registers.
+static unsigned getRegisterForMxtrDSP(MCInst &Inst, bool IsMFDSP) {
+ switch (Inst.getOpcode()) {
+ case Mips::MFTLO:
+ case Mips::MTTLO:
+ switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
+ case Mips::AC0:
+ return Mips::ZERO;
+ case Mips::AC1:
+ return Mips::A0;
+ case Mips::AC2:
+ return Mips::T0;
+ case Mips::AC3:
+ return Mips::T4;
+ default:
+ llvm_unreachable("Unknown register for 'mttr' alias!");
+ }
+ case Mips::MFTHI:
+ case Mips::MTTHI:
+ switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
+ case Mips::AC0:
+ return Mips::AT;
+ case Mips::AC1:
+ return Mips::A1;
+ case Mips::AC2:
+ return Mips::T1;
+ case Mips::AC3:
+ return Mips::T5;
+ default:
+ llvm_unreachable("Unknown register for 'mttr' alias!");
+ }
+ case Mips::MFTACX:
+ case Mips::MTTACX:
+ switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
+ case Mips::AC0:
+ return Mips::V0;
+ case Mips::AC1:
+ return Mips::A2;
+ case Mips::AC2:
+ return Mips::T2;
+ case Mips::AC3:
+ return Mips::T6;
+ default:
+ llvm_unreachable("Unknown register for 'mttr' alias!");
+ }
+ case Mips::MFTDSP:
+ case Mips::MTTDSP:
+ return Mips::S0;
+ default:
+ llvm_unreachable("Unknown instruction for 'mttr' dsp alias!");
+ }
+}
+
+// Map the floating point register operand to the corresponding register
+// operand.
+static unsigned getRegisterForMxtrFP(MCInst &Inst, bool IsMFTC1) {
+ switch (Inst.getOperand(IsMFTC1 ? 1 : 0).getReg()) {
+ case Mips::F0: return Mips::ZERO;
+ case Mips::F1: return Mips::AT;
+ case Mips::F2: return Mips::V0;
+ case Mips::F3: return Mips::V1;
+ case Mips::F4: return Mips::A0;
+ case Mips::F5: return Mips::A1;
+ case Mips::F6: return Mips::A2;
+ case Mips::F7: return Mips::A3;
+ case Mips::F8: return Mips::T0;
+ case Mips::F9: return Mips::T1;
+ case Mips::F10: return Mips::T2;
+ case Mips::F11: return Mips::T3;
+ case Mips::F12: return Mips::T4;
+ case Mips::F13: return Mips::T5;
+ case Mips::F14: return Mips::T6;
+ case Mips::F15: return Mips::T7;
+ case Mips::F16: return Mips::S0;
+ case Mips::F17: return Mips::S1;
+ case Mips::F18: return Mips::S2;
+ case Mips::F19: return Mips::S3;
+ case Mips::F20: return Mips::S4;
+ case Mips::F21: return Mips::S5;
+ case Mips::F22: return Mips::S6;
+ case Mips::F23: return Mips::S7;
+ case Mips::F24: return Mips::T8;
+ case Mips::F25: return Mips::T9;
+ case Mips::F26: return Mips::K0;
+ case Mips::F27: return Mips::K1;
+ case Mips::F28: return Mips::GP;
+ case Mips::F29: return Mips::SP;
+ case Mips::F30: return Mips::FP;
+ case Mips::F31: return Mips::RA;
+ default: llvm_unreachable("Unknown register for mttc1 alias!");
+ }
+}
+
+// Map the coprocessor operand the corresponding gpr register operand.
+static unsigned getRegisterForMxtrC0(MCInst &Inst, bool IsMFTC0) {
+ switch (Inst.getOperand(IsMFTC0 ? 1 : 0).getReg()) {
+ case Mips::COP00: return Mips::ZERO;
+ case Mips::COP01: return Mips::AT;
+ case Mips::COP02: return Mips::V0;
+ case Mips::COP03: return Mips::V1;
+ case Mips::COP04: return Mips::A0;
+ case Mips::COP05: return Mips::A1;
+ case Mips::COP06: return Mips::A2;
+ case Mips::COP07: return Mips::A3;
+ case Mips::COP08: return Mips::T0;
+ case Mips::COP09: return Mips::T1;
+ case Mips::COP010: return Mips::T2;
+ case Mips::COP011: return Mips::T3;
+ case Mips::COP012: return Mips::T4;
+ case Mips::COP013: return Mips::T5;
+ case Mips::COP014: return Mips::T6;
+ case Mips::COP015: return Mips::T7;
+ case Mips::COP016: return Mips::S0;
+ case Mips::COP017: return Mips::S1;
+ case Mips::COP018: return Mips::S2;
+ case Mips::COP019: return Mips::S3;
+ case Mips::COP020: return Mips::S4;
+ case Mips::COP021: return Mips::S5;
+ case Mips::COP022: return Mips::S6;
+ case Mips::COP023: return Mips::S7;
+ case Mips::COP024: return Mips::T8;
+ case Mips::COP025: return Mips::T9;
+ case Mips::COP026: return Mips::K0;
+ case Mips::COP027: return Mips::K1;
+ case Mips::COP028: return Mips::GP;
+ case Mips::COP029: return Mips::SP;
+ case Mips::COP030: return Mips::FP;
+ case Mips::COP031: return Mips::RA;
+ default: llvm_unreachable("Unknown register for mttc0 alias!");
+ }
+}
+
+/// Expand an alias of 'mftr' or 'mttr' into the full instruction, by producing
+/// an mftr or mttr with the correctly mapped gpr register, u, sel and h bits.
+bool MipsAsmParser::expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ unsigned rd = 0;
+ unsigned u = 1;
+ unsigned sel = 0;
+ unsigned h = 0;
+ bool IsMFTR = false;
+ switch (Inst.getOpcode()) {
+ case Mips::MFTC0:
+ IsMFTR = true;
+ LLVM_FALLTHROUGH;
+ case Mips::MTTC0:
+ u = 0;
+ rd = getRegisterForMxtrC0(Inst, IsMFTR);
+ sel = Inst.getOperand(2).getImm();
+ break;
+ case Mips::MFTGPR:
+ IsMFTR = true;
+ LLVM_FALLTHROUGH;
+ case Mips::MTTGPR:
+ rd = Inst.getOperand(IsMFTR ? 1 : 0).getReg();
+ break;
+ case Mips::MFTLO:
+ case Mips::MFTHI:
+ case Mips::MFTACX:
+ case Mips::MFTDSP:
+ IsMFTR = true;
+ LLVM_FALLTHROUGH;
+ case Mips::MTTLO:
+ case Mips::MTTHI:
+ case Mips::MTTACX:
+ case Mips::MTTDSP:
+ rd = getRegisterForMxtrDSP(Inst, IsMFTR);
+ sel = 1;
+ break;
+ case Mips::MFTHC1:
+ h = 1;
+ LLVM_FALLTHROUGH;
+ case Mips::MFTC1:
+ IsMFTR = true;
+ rd = getRegisterForMxtrFP(Inst, IsMFTR);
+ sel = 2;
+ break;
+ case Mips::MTTHC1:
+ h = 1;
+ LLVM_FALLTHROUGH;
+ case Mips::MTTC1:
+ rd = getRegisterForMxtrFP(Inst, IsMFTR);
+ sel = 2;
+ break;
+ case Mips::CFTC1:
+ IsMFTR = true;
+ LLVM_FALLTHROUGH;
+ case Mips::CTTC1:
+ rd = getRegisterForMxtrFP(Inst, IsMFTR);
+ sel = 3;
+ break;
+ }
+ unsigned Op0 = IsMFTR ? Inst.getOperand(0).getReg() : rd;
+ unsigned Op1 =
+ IsMFTR ? rd
+ : (Inst.getOpcode() != Mips::MTTDSP ? Inst.getOperand(1).getReg()
+ : Inst.getOperand(0).getReg());
+
+ TOut.emitRRIII(IsMFTR ? Mips::MFTR : Mips::MTTR, Op0, Op1, u, sel, h, IDLoc,
+ STI);
+ return false;
+}
+
unsigned
MipsAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
const OperandVector &Operands) {
@@ -6329,6 +6554,39 @@ bool MipsAsmParser::parseSetNoOddSPRegDirective() {
return false;
}
+bool MipsAsmParser::parseSetMtDirective() {
+ MCAsmParser &Parser = getParser();
+ Parser.Lex(); // Eat "mt".
+
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token, expected end of statement");
+ return false;
+ }
+
+ setFeatureBits(Mips::FeatureMT, "mt");
+ getTargetStreamer().emitDirectiveSetMt();
+ Parser.Lex(); // Consume the EndOfStatement.
+ return false;
+}
+
+bool MipsAsmParser::parseSetNoMtDirective() {
+ MCAsmParser &Parser = getParser();
+ Parser.Lex(); // Eat "nomt".
+
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token, expected end of statement");
+ return false;
+ }
+
+ clearFeatureBits(Mips::FeatureMT, "mt");
+
+ getTargetStreamer().emitDirectiveSetNoMt();
+ Parser.Lex(); // Consume the EndOfStatement.
+ return false;
+}
+
bool MipsAsmParser::parseSetPopDirective() {
MCAsmParser &Parser = getParser();
SMLoc Loc = getLexer().getLoc();
@@ -6829,6 +7087,10 @@ bool MipsAsmParser::parseDirectiveSet() {
return parseSetMsaDirective();
} else if (Tok.getString() == "nomsa") {
return parseSetNoMsaDirective();
+ } else if (Tok.getString() == "mt") {
+ return parseSetMtDirective();
+ } else if (Tok.getString() == "nomt") {
+ return parseSetNoMtDirective();
} else if (Tok.getString() == "softfloat") {
return parseSetSoftFloatDirective();
} else if (Tok.getString() == "hardfloat") {
@@ -7078,6 +7340,7 @@ bool MipsAsmParser::parseSSectionDirective(StringRef Section, unsigned Type) {
/// ::= .module fp=value
/// ::= .module softfloat
/// ::= .module hardfloat
+/// ::= .module mt
bool MipsAsmParser::parseDirectiveModule() {
MCAsmParser &Parser = getParser();
MCAsmLexer &Lexer = getLexer();
@@ -7177,6 +7440,25 @@ bool MipsAsmParser::parseDirectiveModule() {
}
return false; // parseDirectiveModule has finished successfully.
+ } else if (Option == "mt") {
+ setModuleFeatureBits(Mips::FeatureMT, "mt");
+
+ // Synchronize the ABI Flags information with the FeatureBits information we
+ // updated above.
+ getTargetStreamer().updateABIInfo(*this);
+
+ // If printing assembly, use the recently updated ABI Flags information.
+ // If generating ELF, don't do anything (the .MIPS.abiflags section gets
+ // emitted later).
+ getTargetStreamer().emitDirectiveModuleMT();
+
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token, expected end of statement");
+ return false;
+ }
+
+ return false; // parseDirectiveModule has finished successfully.
} else {
return Error(L, "'" + Twine(Option) + "' is not a valid .module option.");
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
index f38541027023..9abd4f1d6b08 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
@@ -159,6 +159,8 @@ public:
ASESet |= Mips::AFL_ASE_MICROMIPS;
if (P.inMips16Mode())
ASESet |= Mips::AFL_ASE_MIPS16;
+ if (P.hasMT())
+ ASESet |= Mips::AFL_ASE_MT;
}
template <class PredicateLibrary>
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index ae48d6e38fa0..a1ed0ea4d7f3 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -238,7 +238,7 @@ static unsigned calculateMMLEIndex(unsigned i) {
void MipsAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
- bool IsPCRel) const {
+ bool IsResolved) const {
MCFixupKind Kind = Fixup.getKind();
MCContext &Ctx = Asm.getContext();
Value = adjustFixupValue(Fixup, Value, Ctx);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
index bf3b290b7ed5..8ebde3b9b7a4 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
@@ -40,7 +40,7 @@ public:
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override;
+ uint64_t Value, bool IsResolved) const override;
Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
diff --git a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index 0cd4aebe4d16..7caeb08589af 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -50,6 +50,8 @@ void MipsTargetStreamer::emitDirectiveSetMacro() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetNoMacro() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMsa() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetNoMsa() { forbidModuleDirective(); }
+void MipsTargetStreamer::emitDirectiveSetMt() {}
+void MipsTargetStreamer::emitDirectiveSetNoMt() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetAt() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetAtWithArg(unsigned RegNo) {
forbidModuleDirective();
@@ -118,6 +120,7 @@ void MipsTargetStreamer::emitDirectiveModuleOddSPReg() {
}
void MipsTargetStreamer::emitDirectiveModuleSoftFloat() {}
void MipsTargetStreamer::emitDirectiveModuleHardFloat() {}
+void MipsTargetStreamer::emitDirectiveModuleMT() {}
void MipsTargetStreamer::emitDirectiveSetFp(
MipsABIFlagsSection::FpABIKind Value) {
forbidModuleDirective();
@@ -190,6 +193,21 @@ void MipsTargetStreamer::emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1,
emitRRX(Opcode, Reg0, Reg1, MCOperand::createImm(Imm), IDLoc, STI);
}
+void MipsTargetStreamer::emitRRIII(unsigned Opcode, unsigned Reg0,
+ unsigned Reg1, int16_t Imm0, int16_t Imm1,
+ int16_t Imm2, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(MCOperand::createReg(Reg0));
+ TmpInst.addOperand(MCOperand::createReg(Reg1));
+ TmpInst.addOperand(MCOperand::createImm(Imm0));
+ TmpInst.addOperand(MCOperand::createImm(Imm1));
+ TmpInst.addOperand(MCOperand::createImm(Imm2));
+ TmpInst.setLoc(IDLoc);
+ getStreamer().EmitInstruction(TmpInst, *STI);
+}
+
void MipsTargetStreamer::emitAddu(unsigned DstReg, unsigned SrcReg,
unsigned TrgReg, bool Is64Bit,
const MCSubtargetInfo *STI) {
@@ -392,6 +410,16 @@ void MipsTargetAsmStreamer::emitDirectiveSetNoMsa() {
MipsTargetStreamer::emitDirectiveSetNoMsa();
}
+void MipsTargetAsmStreamer::emitDirectiveSetMt() {
+ OS << "\t.set\tmt\n";
+ MipsTargetStreamer::emitDirectiveSetMt();
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetNoMt() {
+ OS << "\t.set\tnomt\n";
+ MipsTargetStreamer::emitDirectiveSetNoMt();
+}
+
void MipsTargetAsmStreamer::emitDirectiveSetAt() {
OS << "\t.set\tat\n";
MipsTargetStreamer::emitDirectiveSetAt();
@@ -656,6 +684,10 @@ void MipsTargetAsmStreamer::emitDirectiveModuleHardFloat() {
OS << "\t.module\thardfloat\n";
}
+void MipsTargetAsmStreamer::emitDirectiveModuleMT() {
+ OS << "\t.module\tmt\n";
+}
+
// This part is for ELF object output.
MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S,
const MCSubtargetInfo &STI)
diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td
index f24761d7d101..d2f0fdcc6cc1 100644
--- a/lib/Target/Mips/Mips.td
+++ b/lib/Target/Mips/Mips.td
@@ -188,6 +188,8 @@ def FeatureUseTCCInDIV : SubtargetFeature<
def FeatureMadd4 : SubtargetFeature<"nomadd4", "DisableMadd4", "true",
"Disable 4-operand madd.fmt and related instructions">;
+def FeatureMT : SubtargetFeature<"mt", "HasMT", "true", "Mips MT ASE">;
+
//===----------------------------------------------------------------------===//
// Mips processors supported.
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index 40078fb77144..89a5854bede0 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -240,7 +240,8 @@ def HasMSA : Predicate<"Subtarget->hasMSA()">,
AssemblerPredicate<"FeatureMSA">;
def HasMadd4 : Predicate<"!Subtarget->disableMadd4()">,
AssemblerPredicate<"!FeatureMadd4">;
-
+def HasMT : Predicate<"Subtarget->hasMT()">,
+ AssemblerPredicate<"FeatureMT">;
//===----------------------------------------------------------------------===//
// Mips GPR size adjectives.
@@ -382,6 +383,10 @@ class ASE_MSA64 {
list<Predicate> InsnPredicates = [HasMSA, HasMips64];
}
+class ASE_MT {
+ list <Predicate> InsnPredicates = [HasMT];
+}
+
// Class used for separating microMIPSr6 and microMIPS (r3) instruction.
// It can be used only on instructions that doesn't inherit PredicateControl.
class ISA_MICROMIPS_NOT_32R6_64R6 : PredicateControl {
@@ -2919,6 +2924,10 @@ include "MipsMSAInstrInfo.td"
include "MipsEVAInstrFormats.td"
include "MipsEVAInstrInfo.td"
+// MT
+include "MipsMTInstrFormats.td"
+include "MipsMTInstrInfo.td"
+
// Micromips
include "MicroMipsInstrFormats.td"
include "MicroMipsInstrInfo.td"
diff --git a/lib/Target/Mips/MipsMTInstrFormats.td b/lib/Target/Mips/MipsMTInstrFormats.td
new file mode 100644
index 000000000000..edc0981e6278
--- /dev/null
+++ b/lib/Target/Mips/MipsMTInstrFormats.td
@@ -0,0 +1,99 @@
+//===-- MipsMTInstrFormats.td - Mips Instruction Formats ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Describe the MIPS MT instructions format
+//
+// opcode - operation code.
+// rt - destination register
+//
+//===----------------------------------------------------------------------===//
+
+class MipsMTInst : MipsInst<(outs), (ins), "", [], NoItinerary, FrmOther>,
+ PredicateControl {
+ let DecoderNamespace = "Mips";
+ let EncodingPredicates = [HasStdEnc];
+}
+
+class OPCODE1<bits<1> Val> {
+ bits<1> Value = Val;
+}
+
+def OPCODE_SC_D : OPCODE1<0b0>;
+def OPCODE_SC_E : OPCODE1<0b1>;
+
+class FIELD5<bits<5> Val> {
+ bits<5> Value = Val;
+}
+
+def FIELD5_1_DMT_EMT : FIELD5<0b00001>;
+def FIELD5_2_DMT_EMT : FIELD5<0b01111>;
+def FIELD5_1_2_DVPE_EVPE : FIELD5<0b00000>;
+def FIELD5_MFTR : FIELD5<0b01000>;
+def FIELD5_MTTR : FIELD5<0b01100>;
+
+class COP0_MFMC0_MT<FIELD5 Op1, FIELD5 Op2, OPCODE1 sc> : MipsMTInst {
+ bits<32> Inst;
+
+ bits<5> rt;
+ let Inst{31-26} = 0b010000; // COP0
+ let Inst{25-21} = 0b01011; // MFMC0
+ let Inst{20-16} = rt;
+ let Inst{15-11} = Op1.Value;
+ let Inst{10-6} = Op2.Value;
+ let Inst{5} = sc.Value;
+ let Inst{4-3} = 0b00;
+ let Inst{2-0} = 0b001;
+}
+
+class COP0_MFTTR_MT<FIELD5 Op> : MipsMTInst {
+ bits<32> Inst;
+
+ bits<5> rt;
+ bits<5> rd;
+ bits<1> u;
+ bits<1> h;
+ bits<3> sel;
+ let Inst{31-26} = 0b010000; // COP0
+ let Inst{25-21} = Op.Value; // MFMC0
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = 0b00000; // rx - currently unsupported.
+ let Inst{5} = u;
+ let Inst{4} = h;
+ let Inst{3} = 0b0;
+ let Inst{2-0} = sel;
+}
+
+class SPECIAL3_MT_FORK : MipsMTInst {
+ bits<32> Inst;
+
+ bits<5> rs;
+ bits<5> rt;
+ bits<5> rd;
+ let Inst{31-26} = 0b011111; // SPECIAL3
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = 0b00000;
+ let Inst{5-0} = 0b001000; // FORK
+}
+
+class SPECIAL3_MT_YIELD : MipsMTInst {
+ bits<32> Inst;
+
+ bits<5> rs;
+ bits<5> rd;
+ let Inst{31-26} = 0b011111; // SPECIAL3
+ let Inst{25-21} = rs;
+ let Inst{20-16} = 0b00000;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = 0b00000;
+ let Inst{5-0} = 0b001001; // FORK
+}
diff --git a/lib/Target/Mips/MipsMTInstrInfo.td b/lib/Target/Mips/MipsMTInstrInfo.td
new file mode 100644
index 000000000000..72e626cbec40
--- /dev/null
+++ b/lib/Target/Mips/MipsMTInstrInfo.td
@@ -0,0 +1,208 @@
+//===-- MipsMTInstrInfo.td - Mips MT Instruction Infos -----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the MIPS MT ASE as defined by MD00378 1.12.
+//
+// TODO: Add support for the microMIPS encodings for the MT ASE and add the
+// instruction mappings.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MIPS MT Instruction Encodings
+//===----------------------------------------------------------------------===//
+
+class DMT_ENC : COP0_MFMC0_MT<FIELD5_1_DMT_EMT, FIELD5_2_DMT_EMT,
+ OPCODE_SC_D>;
+
+class EMT_ENC : COP0_MFMC0_MT<FIELD5_1_DMT_EMT, FIELD5_2_DMT_EMT,
+ OPCODE_SC_E>;
+
+class DVPE_ENC : COP0_MFMC0_MT<FIELD5_1_2_DVPE_EVPE, FIELD5_1_2_DVPE_EVPE,
+ OPCODE_SC_D>;
+
+class EVPE_ENC : COP0_MFMC0_MT<FIELD5_1_2_DVPE_EVPE, FIELD5_1_2_DVPE_EVPE,
+ OPCODE_SC_E>;
+
+class FORK_ENC : SPECIAL3_MT_FORK;
+
+class YIELD_ENC : SPECIAL3_MT_YIELD;
+
+class MFTR_ENC : COP0_MFTTR_MT<FIELD5_MFTR>;
+
+class MTTR_ENC : COP0_MFTTR_MT<FIELD5_MTTR>;
+
+//===----------------------------------------------------------------------===//
+// MIPS MT Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+class MT_1R_DESC_BASE<string instr_asm, InstrItinClass Itin = NoItinerary> {
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins);
+ string AsmString = !strconcat(instr_asm, "\t$rt");
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = Itin;
+}
+
+class MFTR_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins GPR32Opnd:$rt, uimm1:$u, uimm3:$sel, uimm1:$h);
+ string AsmString = "mftr\t$rd, $rt, $u, $sel, $h";
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = II_MFTR;
+}
+
+class MTTR_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins GPR32Opnd:$rt, uimm1:$u, uimm3:$sel, uimm1:$h);
+ string AsmString = "mttr\t$rt, $rd, $u, $sel, $h";
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = II_MTTR;
+}
+
+class FORK_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rs, GPR32Opnd:$rd);
+ dag InOperandList = (ins GPR32Opnd:$rt);
+ string AsmString = "fork\t$rd, $rs, $rt";
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = II_FORK;
+}
+
+class YIELD_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins GPR32Opnd:$rs);
+ string AsmString = "yield\t$rd, $rs";
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = II_YIELD;
+}
+
+class DMT_DESC : MT_1R_DESC_BASE<"dmt", II_DMT>;
+
+class EMT_DESC : MT_1R_DESC_BASE<"emt", II_EMT>;
+
+class DVPE_DESC : MT_1R_DESC_BASE<"dvpe", II_DVPE>;
+
+class EVPE_DESC : MT_1R_DESC_BASE<"evpe", II_EVPE>;
+
+//===----------------------------------------------------------------------===//
+// MIPS MT Instruction Definitions
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 1, isNotDuplicable = 1,
+ AdditionalPredicates = [NotInMicroMips] in {
+ def DMT : DMT_ENC, DMT_DESC, ASE_MT;
+
+ def EMT : EMT_ENC, EMT_DESC, ASE_MT;
+
+ def DVPE : DVPE_ENC, DVPE_DESC, ASE_MT;
+
+ def EVPE : EVPE_ENC, EVPE_DESC, ASE_MT;
+
+ def FORK : FORK_ENC, FORK_DESC, ASE_MT;
+
+ def YIELD : YIELD_ENC, YIELD_DESC, ASE_MT;
+
+ def MFTR : MFTR_ENC, MFTR_DESC, ASE_MT;
+
+ def MTTR : MTTR_ENC, MTTR_DESC, ASE_MT;
+}
+
+//===----------------------------------------------------------------------===//
+// MIPS MT Pseudo Instructions - used to support mtfr & mttr aliases.
+//===----------------------------------------------------------------------===//
+def MFTC0 : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins COP0Opnd:$rt,
+ uimm3:$sel),
+ "mftc0 $rd, $rt, $sel">, ASE_MT;
+
+def MFTGPR : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins GPR32Opnd:$rt,
+ uimm3:$sel),
+ "mftgpr $rd, $rt">, ASE_MT;
+
+def MFTLO : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins ACC64DSPOpnd:$ac),
+ "mftlo $rt, $ac">, ASE_MT;
+
+def MFTHI : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins ACC64DSPOpnd:$ac),
+ "mfthi $rt, $ac">, ASE_MT;
+
+def MFTACX : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins ACC64DSPOpnd:$ac),
+ "mftacx $rt, $ac">, ASE_MT;
+
+def MFTDSP : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins),
+ "mftdsp $rt">, ASE_MT;
+
+def MFTC1 : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins FGR32Opnd:$ft),
+ "mftc1 $rt, $ft">, ASE_MT;
+
+def MFTHC1 : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins FGR32Opnd:$ft),
+ "mfthc1 $rt, $ft">, ASE_MT;
+
+def CFTC1 : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins FGRCCOpnd:$ft),
+ "cftc1 $rt, $ft">, ASE_MT;
+
+
+def MTTC0 : MipsAsmPseudoInst<(outs COP0Opnd:$rd), (ins GPR32Opnd:$rt,
+ uimm3:$sel),
+ "mttc0 $rt, $rd, $sel">, ASE_MT;
+
+def MTTGPR : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins GPR32Opnd:$rd),
+ "mttgpr $rd, $rt">, ASE_MT;
+
+def MTTLO : MipsAsmPseudoInst<(outs ACC64DSPOpnd:$ac), (ins GPR32Opnd:$rt),
+ "mttlo $rt, $ac">, ASE_MT;
+
+def MTTHI : MipsAsmPseudoInst<(outs ACC64DSPOpnd:$ac), (ins GPR32Opnd:$rt),
+ "mtthi $rt, $ac">, ASE_MT;
+
+def MTTACX : MipsAsmPseudoInst<(outs ACC64DSPOpnd:$ac), (ins GPR32Opnd:$rt),
+ "mttacx $rt, $ac">, ASE_MT;
+
+def MTTDSP : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rt),
+ "mttdsp $rt">, ASE_MT;
+
+def MTTC1 : MipsAsmPseudoInst<(outs FGR32Opnd:$ft), (ins GPR32Opnd:$rt),
+ "mttc1 $rt, $ft">, ASE_MT;
+
+def MTTHC1 : MipsAsmPseudoInst<(outs FGR32Opnd:$ft), (ins GPR32Opnd:$rt),
+ "mtthc1 $rt, $ft">, ASE_MT;
+
+def CTTC1 : MipsAsmPseudoInst<(outs FGRCCOpnd:$ft), (ins GPR32Opnd:$rt),
+ "cttc1 $rt, $ft">, ASE_MT;
+
+//===----------------------------------------------------------------------===//
+// MIPS MT Instruction Definitions
+//===----------------------------------------------------------------------===//
+
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsInstAlias<"dmt", (DMT ZERO), 1>, ASE_MT;
+
+ def : MipsInstAlias<"emt", (EMT ZERO), 1>, ASE_MT;
+
+ def : MipsInstAlias<"dvpe", (DVPE ZERO), 1>, ASE_MT;
+
+ def : MipsInstAlias<"evpe", (EVPE ZERO), 1>, ASE_MT;
+
+ def : MipsInstAlias<"yield $rs", (YIELD ZERO, GPR32Opnd:$rs), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mftc0 $rd, $rt", (MFTC0 GPR32Opnd:$rd, COP0Opnd:$rt, 0),
+ 1>, ASE_MT;
+
+ def : MipsInstAlias<"mftlo $rt", (MFTLO GPR32Opnd:$rt, AC0), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mfthi $rt", (MFTHI GPR32Opnd:$rt, AC0), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mftacx $rt", (MFTACX GPR32Opnd:$rt, AC0), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mttc0 $rd, $rt", (MTTC0 COP0Opnd:$rt, GPR32Opnd:$rd, 0),
+ 1>, ASE_MT;
+
+ def : MipsInstAlias<"mttlo $rt", (MTTLO AC0, GPR32Opnd:$rt), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mtthi $rt", (MTTHI AC0, GPR32Opnd:$rt), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mttacx $rt", (MTTACX AC0, GPR32Opnd:$rt), 1>, ASE_MT;
+}
diff --git a/lib/Target/Mips/MipsSchedule.td b/lib/Target/Mips/MipsSchedule.td
index c0de59ba15f5..8ec55ab6284d 100644
--- a/lib/Target/Mips/MipsSchedule.td
+++ b/lib/Target/Mips/MipsSchedule.td
@@ -84,6 +84,7 @@ def II_DIVU : InstrItinClass;
def II_DIV_D : InstrItinClass;
def II_DIV_S : InstrItinClass;
def II_DMFC0 : InstrItinClass;
+def II_DMT : InstrItinClass;
def II_DMTC0 : InstrItinClass;
def II_DMFC1 : InstrItinClass;
def II_DMTC1 : InstrItinClass;
@@ -113,8 +114,12 @@ def II_DSBH : InstrItinClass;
def II_DSHD : InstrItinClass;
def II_DSUBU : InstrItinClass;
def II_DSUB : InstrItinClass;
+def II_DVPE : InstrItinClass;
+def II_EMT : InstrItinClass;
+def II_EVPE : InstrItinClass;
def II_EXT : InstrItinClass; // Any EXT instruction
def II_FLOOR : InstrItinClass;
+def II_FORK : InstrItinClass;
def II_INS : InstrItinClass; // Any INS instruction
def II_IndirectBranchPseudo : InstrItinClass; // Indirect branch pseudo.
def II_J : InstrItinClass;
@@ -221,6 +226,7 @@ def II_MFC1 : InstrItinClass;
def II_MFHC1 : InstrItinClass;
def II_MFC2 : InstrItinClass;
def II_MFHI_MFLO : InstrItinClass; // mfhi and mflo
+def II_MFTR : InstrItinClass;
def II_MOD : InstrItinClass;
def II_MODU : InstrItinClass;
def II_MOVE : InstrItinClass;
@@ -250,6 +256,7 @@ def II_MTC1 : InstrItinClass;
def II_MTHC1 : InstrItinClass;
def II_MTC2 : InstrItinClass;
def II_MTHI_MTLO : InstrItinClass; // mthi and mtlo
+def II_MTTR : InstrItinClass;
def II_MUL : InstrItinClass;
def II_MUH : InstrItinClass;
def II_MUHU : InstrItinClass;
@@ -345,6 +352,7 @@ def II_WRPGPR : InstrItinClass;
def II_RDPGPR : InstrItinClass;
def II_DVP : InstrItinClass;
def II_EVP : InstrItinClass;
+def II_YIELD : InstrItinClass;
//===----------------------------------------------------------------------===//
// Mips Generic instruction itineraries.
@@ -386,6 +394,7 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<II_DCLZ , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DMOD , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_DMODU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_DMT , [InstrStage<2, [ALU]>]>,
InstrItinData<II_DSLL , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSLL32 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSRL , [InstrStage<1, [ALU]>]>,
@@ -404,7 +413,11 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<II_DSHD , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DCLO , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DCLZ , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DVPE , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_EMT , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_EVPE , [InstrStage<2, [ALU]>]>,
InstrItinData<II_EXT , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_FORK , [InstrStage<1, [ALU]>]>,
InstrItinData<II_INS , [InstrStage<1, [ALU]>]>,
InstrItinData<II_LUI , [InstrStage<1, [ALU]>]>,
InstrItinData<II_MOVE , [InstrStage<1, [ALU]>]>,
@@ -653,12 +666,14 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<II_MFHC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MFC1 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MFC2 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MFTR , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTHC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTC1 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTC2 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MFHC1 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTHC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MTTR , [InstrStage<2, [ALU]>]>,
InstrItinData<II_CACHE , [InstrStage<1, [ALU]>]>,
InstrItinData<II_PREF , [InstrStage<1, [ALU]>]>,
InstrItinData<II_CACHEE , [InstrStage<1, [ALU]>]>,
@@ -670,5 +685,6 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<II_WRPGPR , [InstrStage<1, [ALU]>]>,
InstrItinData<II_RDPGPR , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DVP , [InstrStage<1, [ALU]>]>,
- InstrItinData<II_EVP , [InstrStage<1, [ALU]>]>
+ InstrItinData<II_EVP , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_YIELD , [InstrStage<5, [ALU]>]>
]>;
diff --git a/lib/Target/Mips/MipsScheduleGeneric.td b/lib/Target/Mips/MipsScheduleGeneric.td
index 15a0401b781e..89cda676441e 100644
--- a/lib/Target/Mips/MipsScheduleGeneric.td
+++ b/lib/Target/Mips/MipsScheduleGeneric.td
@@ -187,7 +187,11 @@ def GenericIssueCOP0 : ProcResource<1> { let Super = GenericCOP0; }
def GenericWriteCOP0TLB : SchedWriteRes<[GenericIssueCOP0]> { let Latency = 4; }
def GenericWriteCOP0 : SchedWriteRes<[GenericIssueCOP0]> { let Latency = 3; }
def GenericReadCOP0 : SchedWriteRes<[GenericIssueCOP0]> { let Latency = 2; }
-def GnereicReadWritePGPR : SchedWriteRes<[GenericIssueCOP0]>;
+def GenericReadWritePGPR : SchedWriteRes<[GenericIssueCOP0]>;
+def GenericReadWriteCOP0Long : SchedWriteRes<[GenericIssueCOP0]> {
+ let Latency = 5;
+}
+def GenericWriteCOP0Short : SchedWriteRes<[GenericIssueCOP0]>;
def : ItinRW<[GenericWriteCOP0TLB], [II_TLBP, II_TLBR, II_TLBWI, II_TLBWR]>;
def : ItinRW<[GenericWriteCOP0TLB], [II_TLBINV, II_TLBINVF]>;
@@ -261,6 +265,14 @@ def : ItinRW<[GenericWriteLoad], [II_LBE, II_LBUE, II_LHE, II_LHUE, II_LWE,
def : ItinRW<[GenericWriteLoad], [II_LWLE, II_LWRE]>;
+// MIPS MT instructions
+// ====================
+
+def : ItinRW<[GenericWriteMove], [II_DMT, II_DVPE, II_EMT, II_EVPE]>;
+
+def : ItinRW<[GenericReadWriteCOP0Long], [II_YIELD]>;
+def : ItinRW<[GenericWriteCOP0Short], [II_FORK]>;
+
// MIPS32R6 and MIPS16e
// ====================
diff --git a/lib/Target/Mips/MipsScheduleP5600.td b/lib/Target/Mips/MipsScheduleP5600.td
index 882a241d1426..fedfac24e4e7 100644
--- a/lib/Target/Mips/MipsScheduleP5600.td
+++ b/lib/Target/Mips/MipsScheduleP5600.td
@@ -19,7 +19,7 @@ def MipsP5600Model : SchedMachineModel {
HasMips64, HasMips64r2, HasCnMips,
InMicroMips, InMips16Mode,
HasMicroMips32r6, HasMicroMips64r6,
- HasDSP, HasDSPR2];
+ HasDSP, HasDSPR2, HasMT];
}
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index 154d5825427b..eba21e0a1c67 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -70,7 +70,8 @@ MipsSubtarget::MipsSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
InMips16HardFloat(Mips16HardFloat), InMicroMipsMode(false), HasDSP(false),
HasDSPR2(false), HasDSPR3(false), AllowMixed16_32(Mixed16_32 | Mips_Os16),
Os16(Mips_Os16), HasMSA(false), UseTCCInDIV(false), HasSym32(false),
- HasEVA(false), DisableMadd4(false), TM(TM), TargetTriple(TT), TSInfo(),
+ HasEVA(false), DisableMadd4(false), HasMT(false), TM(TM),
+ TargetTriple(TT), TSInfo(),
InstrInfo(
MipsInstrInfo::create(initializeSubtargetDependencies(CPU, FS, TM))),
FrameLowering(MipsFrameLowering::create(*this)),
diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h
index ccd47f00c0d3..7619e7b08612 100644
--- a/lib/Target/Mips/MipsSubtarget.h
+++ b/lib/Target/Mips/MipsSubtarget.h
@@ -149,6 +149,9 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
// related instructions.
bool DisableMadd4;
+ // HasMT -- support MT ASE.
+ bool HasMT;
+
InstrItineraryData InstrItins;
// We can override the determination of whether we are in mips16 mode
@@ -259,6 +262,7 @@ public:
bool hasMSA() const { return HasMSA; }
bool disableMadd4() const { return DisableMadd4; }
bool hasEVA() const { return HasEVA; }
+ bool hasMT() const { return HasMT; }
bool useSmallSection() const { return UseSmallSection; }
bool hasStandardEncoding() const { return !inMips16Mode(); }
diff --git a/lib/Target/Mips/MipsTargetStreamer.h b/lib/Target/Mips/MipsTargetStreamer.h
index 41ebe411b98d..af24838665e1 100644
--- a/lib/Target/Mips/MipsTargetStreamer.h
+++ b/lib/Target/Mips/MipsTargetStreamer.h
@@ -40,6 +40,8 @@ public:
virtual void emitDirectiveSetNoMacro();
virtual void emitDirectiveSetMsa();
virtual void emitDirectiveSetNoMsa();
+ virtual void emitDirectiveSetMt();
+ virtual void emitDirectiveSetNoMt();
virtual void emitDirectiveSetAt();
virtual void emitDirectiveSetAtWithArg(unsigned RegNo);
virtual void emitDirectiveSetNoAt();
@@ -96,6 +98,7 @@ public:
virtual void emitDirectiveModuleOddSPReg();
virtual void emitDirectiveModuleSoftFloat();
virtual void emitDirectiveModuleHardFloat();
+ virtual void emitDirectiveModuleMT();
virtual void emitDirectiveSetFp(MipsABIFlagsSection::FpABIKind Value);
virtual void emitDirectiveSetOddSPReg();
virtual void emitDirectiveSetNoOddSPReg();
@@ -116,6 +119,9 @@ public:
SMLoc IDLoc, const MCSubtargetInfo *STI);
void emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm,
SMLoc IDLoc, const MCSubtargetInfo *STI);
+ void emitRRIII(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm0,
+ int16_t Imm1, int16_t Imm2, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
void emitAddu(unsigned DstReg, unsigned SrcReg, unsigned TrgReg, bool Is64Bit,
const MCSubtargetInfo *STI);
void emitDSLL(unsigned DstReg, unsigned SrcReg, int16_t ShiftAmount,
@@ -204,6 +210,8 @@ public:
void emitDirectiveSetNoMacro() override;
void emitDirectiveSetMsa() override;
void emitDirectiveSetNoMsa() override;
+ void emitDirectiveSetMt() override;
+ void emitDirectiveSetNoMt() override;
void emitDirectiveSetAt() override;
void emitDirectiveSetAtWithArg(unsigned RegNo) override;
void emitDirectiveSetNoAt() override;
@@ -267,6 +275,7 @@ public:
void emitDirectiveModuleOddSPReg() override;
void emitDirectiveModuleSoftFloat() override;
void emitDirectiveModuleHardFloat() override;
+ void emitDirectiveModuleMT() override;
void emitDirectiveSetFp(MipsABIFlagsSection::FpABIKind Value) override;
void emitDirectiveSetOddSPReg() override;
void emitDirectiveSetNoOddSPReg() override;
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index f26b9a7cb8dd..f800d91f4093 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -62,7 +62,6 @@
#include <utility>
#include <vector>
-#undef DEBUG_TYPE
#define DEBUG_TYPE "nvptx-lower"
using namespace llvm;
@@ -2456,7 +2455,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
// v2f16 was loaded as an i32. Now we must bitcast it back.
else if (EltVT == MVT::v2f16)
Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
- // Extend the element if necesary (e.g. an i8 is loaded
+ // Extend the element if necessary (e.g. an i8 is loaded
// into an i16 register)
if (Ins[InsIdx].VT.isInteger() &&
Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {
diff --git a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
index 3be291b48b8f..989f0a3aba2f 100644
--- a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
+++ b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "NVPTXLowerAggrCopies.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -42,6 +43,7 @@ struct NVPTXLowerAggrCopies : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<StackProtector>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
}
bool runOnFunction(Function &F) override;
@@ -61,6 +63,8 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
const DataLayout &DL = F.getParent()->getDataLayout();
LLVMContext &Context = F.getParent()->getContext();
+ const TargetTransformInfo &TTI =
+ getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
// Collect all aggregate loads and mem* calls.
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
@@ -104,15 +108,26 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
Value *SrcAddr = LI->getOperand(0);
Value *DstAddr = SI->getOperand(1);
unsigned NumLoads = DL.getTypeStoreSize(LI->getType());
- Value *CopyLen = ConstantInt::get(Type::getInt32Ty(Context), NumLoads);
-
- createMemCpyLoop(/* ConvertedInst */ SI,
- /* SrcAddr */ SrcAddr, /* DstAddr */ DstAddr,
- /* CopyLen */ CopyLen,
- /* SrcAlign */ LI->getAlignment(),
- /* DestAlign */ SI->getAlignment(),
- /* SrcIsVolatile */ LI->isVolatile(),
- /* DstIsVolatile */ SI->isVolatile());
+ ConstantInt *CopyLen =
+ ConstantInt::get(Type::getInt32Ty(Context), NumLoads);
+
+ if (!TTI.useWideIRMemcpyLoopLowering()) {
+ createMemCpyLoop(/* ConvertedInst */ SI,
+ /* SrcAddr */ SrcAddr, /* DstAddr */ DstAddr,
+ /* CopyLen */ CopyLen,
+ /* SrcAlign */ LI->getAlignment(),
+ /* DestAlign */ SI->getAlignment(),
+ /* SrcIsVolatile */ LI->isVolatile(),
+ /* DstIsVolatile */ SI->isVolatile());
+ } else {
+ createMemCpyLoopKnownSize(/* ConvertedInst */ SI,
+ /* SrcAddr */ SrcAddr, /* DstAddr */ DstAddr,
+ /* CopyLen */ CopyLen,
+ /* SrcAlign */ LI->getAlignment(),
+ /* DestAlign */ SI->getAlignment(),
+ /* SrcIsVolatile */ LI->isVolatile(),
+ /* DstIsVolatile */ SI->isVolatile(), TTI);
+ }
SI->eraseFromParent();
LI->eraseFromParent();
@@ -121,7 +136,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
// Transform mem* intrinsic calls.
for (MemIntrinsic *MemCall : MemCalls) {
if (MemCpyInst *Memcpy = dyn_cast<MemCpyInst>(MemCall)) {
- expandMemCpyAsLoop(Memcpy);
+ expandMemCpyAsLoop(Memcpy, TTI);
} else if (MemMoveInst *Memmove = dyn_cast<MemMoveInst>(MemCall)) {
expandMemMoveAsLoop(Memmove);
} else if (MemSetInst *Memset = dyn_cast<MemSetInst>(MemCall)) {
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
index 7393f3d7a08a..bdad2fe8714f 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
@@ -115,7 +115,7 @@ public:
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override {
+ uint64_t Value, bool IsResolved) const override {
Value = adjustFixupValue(Fixup.getKind(), Value);
if (!Value) return; // Doesn't change encoding.
diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp
index 094d3e6a61b5..53f33ac1fc0e 100644
--- a/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -607,7 +607,10 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
// The old condition may be dead now, and may have even created a dead PHI
// (the original induction variable).
RecursivelyDeleteTriviallyDeadInstructions(OldCond);
- DeleteDeadPHIs(CountedExitBlock);
+ // Run through the basic blocks of the loop and see if any of them have dead
+ // PHIs that can be removed.
+ for (auto I : L->blocks())
+ DeleteDeadPHIs(I);
++NumCTRLoops;
return MadeChange;
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index c2c115cb6daf..b49c3345a17d 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -435,22 +435,19 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
- // If we are a leaf function, and use up to 224 bytes of stack space,
- // don't have a frame pointer, calls, or dynamic alloca then we do not need
- // to adjust the stack pointer (we fit in the Red Zone).
- // The 32-bit SVR4 ABI has no Red Zone. However, it can still generate
- // stackless code if all local vars are reg-allocated.
- bool DisableRedZone = MF.getFunction()->hasFnAttribute(Attribute::NoRedZone);
unsigned LR = RegInfo->getRARegister();
- if (!DisableRedZone &&
- (Subtarget.isPPC64() || // 32-bit SVR4, no stack-
- !Subtarget.isSVR4ABI() || // allocated locals.
- FrameSize == 0) &&
- FrameSize <= 224 && // Fits in red zone.
- !MFI.hasVarSizedObjects() && // No dynamic alloca.
- !MFI.adjustsStack() && // No calls.
- !MustSaveLR(MF, LR) &&
- !RegInfo->hasBasePointer(MF)) { // No special alignment.
+ bool DisableRedZone = MF.getFunction()->hasFnAttribute(Attribute::NoRedZone);
+ bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca.
+ !MFI.adjustsStack() && // No calls.
+ !MustSaveLR(MF, LR) && // No need to save LR.
+ !RegInfo->hasBasePointer(MF); // No special alignment.
+
+ // Note: for PPC32 SVR4ABI (Non-DarwinABI), we can still generate stackless
+ // code if all local vars are reg-allocated.
+ bool FitsInRedZone = FrameSize <= Subtarget.getRedZoneSize();
+
+ // Check whether we can skip adjusting the stack pointer (by using red zone)
+ if (!DisableRedZone && CanUseRedZone && FitsInRedZone) {
// No need for frame
if (UpdateMF)
MFI.setStackSize(0);
@@ -1869,8 +1866,13 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
}
if (HasVRSaveArea) {
- // Insert alignment padding, we need 16-byte alignment.
- LowerBound = (LowerBound - 15) & ~(15);
+ // Insert alignment padding, we need 16-byte alignment. Note: for postive
+ // number the alignment formula is : y = (x + (n-1)) & (~(n-1)). But since
+ // we are using negative number here (the stack grows downward). We should
+ // use formula : y = x & (~(n-1)). Where x is the size before aligning, n
+ // is the alignment size ( n = 16 here) and y is the size after aligning.
+ assert(LowerBound <= 0 && "Expect LowerBound have a non-positive value!");
+ LowerBound &= ~(15);
for (unsigned i = 0, e = VRegs.size(); i != e; ++i) {
int FI = VRegs[i].getFrameIdx();
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 535b9deaefac..3aaf7ef2c2a0 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -419,25 +419,6 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
.getNode();
}
-/// isIntS16Immediate - This method tests to see if the node is either a 32-bit
-/// or 64-bit immediate, and if the value can be accurately represented as a
-/// sign extension from a 16-bit value. If so, this returns true and the
-/// immediate.
-static bool isIntS16Immediate(SDNode *N, short &Imm) {
- if (N->getOpcode() != ISD::Constant)
- return false;
-
- Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
- if (N->getValueType(0) == MVT::i32)
- return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
- else
- return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
-}
-
-static bool isIntS16Immediate(SDValue Op, short &Imm) {
- return isIntS16Immediate(Op.getNode(), Imm);
-}
-
/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
@@ -728,7 +709,10 @@ static uint64_t Rot64(uint64_t Imm, unsigned R) {
static unsigned getInt64Count(int64_t Imm) {
unsigned Count = getInt64CountDirect(Imm);
- if (Count == 1)
+
+ // If the instruction count is 1 or 2, we do not need further analysis
+ // since rotate + load constant requires at least 2 instructions.
+ if (Count <= 2)
return Count;
for (unsigned r = 1; r < 63; ++r) {
@@ -838,7 +822,10 @@ static SDNode *getInt64Direct(SelectionDAG *CurDAG, const SDLoc &dl,
static SDNode *getInt64(SelectionDAG *CurDAG, const SDLoc &dl, int64_t Imm) {
unsigned Count = getInt64CountDirect(Imm);
- if (Count == 1)
+
+ // If the instruction count is 1 or 2, we do not need further analysis
+ // since rotate + load constant requires at least 2 instructions.
+ if (Count <= 2)
return getInt64Direct(CurDAG, dl, Imm);
unsigned RMin = 0;
@@ -2126,7 +2113,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
getI32Imm(Imm & 0xFFFF, dl)), 0);
Opc = PPC::CMPLW;
} else {
- short SImm;
+ int16_t SImm;
if (isIntS16Immediate(RHS, SImm))
return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
getI32Imm((int)SImm & 0xFFFF,
@@ -2173,7 +2160,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
getI64Imm(Imm & 0xFFFF, dl)), 0);
Opc = PPC::CMPLD;
} else {
- short SImm;
+ int16_t SImm;
if (isIntS16Immediate(RHS, SImm))
return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
getI64Imm(SImm & 0xFFFF, dl)),
@@ -3323,7 +3310,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
if (tryLogicOpOfCompares(N))
return;
- short Imm;
+ int16_t Imm;
if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
isIntS16Immediate(N->getOperand(1), Imm)) {
KnownBits LHSKnown;
@@ -3346,7 +3333,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
break;
}
case ISD::ADD: {
- short Imm;
+ int16_t Imm;
if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
isIntS16Immediate(N->getOperand(1), Imm)) {
selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
@@ -4034,11 +4021,13 @@ void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) {
O0.getNode(), O1.getNode());
};
+ // FIXME: When the semantics of the interaction between select and undef
+ // are clearly defined, it may turn out to be unnecessary to break here.
SDValue TrueRes = TryFold(ConstTrue);
- if (!TrueRes)
+ if (!TrueRes || TrueRes.isUndef())
break;
SDValue FalseRes = TryFold(ConstFalse);
- if (!FalseRes)
+ if (!FalseRes || FalseRes.isUndef())
break;
// For us to materialize these using one instruction, we must be able to
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 72f14e969138..0e069ec1665f 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -136,6 +136,10 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
}
+ // Match BITREVERSE to customized fast code sequence in the td file.
+ setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
+ setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
+
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
for (MVT VT : MVT::integer_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
@@ -1168,6 +1172,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::LXSIZX: return "PPCISD::LXSIZX";
case PPCISD::STXSIX: return "PPCISD::STXSIX";
case PPCISD::VEXTS: return "PPCISD::VEXTS";
+ case PPCISD::SExtVElems: return "PPCISD::SExtVElems";
case PPCISD::LXVD2X: return "PPCISD::LXVD2X";
case PPCISD::STXVD2X: return "PPCISD::STXVD2X";
case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
@@ -2028,17 +2033,17 @@ int PPC::isQVALIGNIShuffleMask(SDNode *N) {
/// or 64-bit immediate, and if the value can be accurately represented as a
/// sign extension from a 16-bit value. If so, this returns true and the
/// immediate.
-static bool isIntS16Immediate(SDNode *N, short &Imm) {
+bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
if (N->getValueType(0) == MVT::i32)
return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
else
return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
}
-static bool isIntS16Immediate(SDValue Op, short &Imm) {
+bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
return isIntS16Immediate(Op.getNode(), Imm);
}
@@ -2048,7 +2053,7 @@ static bool isIntS16Immediate(SDValue Op, short &Imm) {
bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
SDValue &Index,
SelectionDAG &DAG) const {
- short imm = 0;
+ int16_t imm = 0;
if (N.getOpcode() == ISD::ADD) {
if (isIntS16Immediate(N.getOperand(1), imm))
return false; // r+i
@@ -2138,7 +2143,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
return false;
if (N.getOpcode() == ISD::ADD) {
- short imm = 0;
+ int16_t imm = 0;
if (isIntS16Immediate(N.getOperand(1), imm) &&
(!Aligned || (imm & 3) == 0)) {
Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
@@ -2162,7 +2167,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
return true; // [&g+r]
}
} else if (N.getOpcode() == ISD::OR) {
- short imm = 0;
+ int16_t imm = 0;
if (isIntS16Immediate(N.getOperand(1), imm) &&
(!Aligned || (imm & 3) == 0)) {
// If this is an or of disjoint bitfields, we can codegen this as an add
@@ -2190,7 +2195,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
// If this address fits entirely in a 16-bit sext immediate field, codegen
// this as "d, 0"
- short Imm;
+ int16_t Imm;
if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) {
Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
@@ -2235,10 +2240,15 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
if (SelectAddressRegReg(N, Base, Index, DAG))
return true;
- // If the operand is an addition, always emit this as [r+r], since this is
- // better (for code size, and execution, as the memop does the add for free)
- // than emitting an explicit add.
- if (N.getOpcode() == ISD::ADD) {
+ // If the address is the result of an add, we will utilize the fact that the
+ // address calculation includes an implicit add. However, we can reduce
+ // register pressure if we do not materialize a constant just for use as the
+ // index register. We only get rid of the add if it is not an add of a
+ // value and a 16-bit signed constant and both have a single use.
+ int16_t imm = 0;
+ if (N.getOpcode() == ISD::ADD &&
+ (!isIntS16Immediate(N.getOperand(1), imm) ||
+ !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
Base = N.getOperand(0);
Index = N.getOperand(1);
return true;
@@ -6422,7 +6432,7 @@ PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
- // Get the corect type for integers.
+ // Get the correct type for integers.
EVT IntVT = Op.getValueType();
// Get the inputs.
@@ -6439,7 +6449,7 @@ SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
// When we pop the dynamic allocation we need to restore the SP link.
SDLoc dl(Op);
- // Get the corect type for pointers.
+ // Get the correct type for pointers.
EVT PtrVT = getPointerTy(DAG.getDataLayout());
// Construct the stack pointer operand.
@@ -6514,7 +6524,7 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDValue Size = Op.getOperand(1);
SDLoc dl(Op);
- // Get the corect type for pointers.
+ // Get the correct type for pointers.
EVT PtrVT = getPointerTy(DAG.getDataLayout());
// Negate the size.
SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
@@ -6645,6 +6655,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
default: break; // SETUO etc aren't handled by fsel.
case ISD::SETNE:
std::swap(TV, FV);
+ LLVM_FALLTHROUGH;
case ISD::SETEQ:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
@@ -6656,6 +6667,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
case ISD::SETULT:
case ISD::SETLT:
std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
+ LLVM_FALLTHROUGH;
case ISD::SETOGE:
case ISD::SETGE:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
@@ -6664,6 +6676,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
case ISD::SETUGT:
case ISD::SETGT:
std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
+ LLVM_FALLTHROUGH;
case ISD::SETOLE:
case ISD::SETLE:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
@@ -6677,6 +6690,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
default: break; // SETUO etc aren't handled by fsel.
case ISD::SETNE:
std::swap(TV, FV);
+ LLVM_FALLTHROUGH;
case ISD::SETEQ:
Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
@@ -11311,6 +11325,132 @@ static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
+// This function adds the required vector_shuffle needed to get
+// the elements of the vector extract in the correct position
+// as specified by the CorrectElems encoding.
+static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
+ SDValue Input, uint64_t Elems,
+ uint64_t CorrectElems) {
+ SDLoc dl(N);
+
+ unsigned NumElems = Input.getValueType().getVectorNumElements();
+ SmallVector<int, 16> ShuffleMask(NumElems, -1);
+
+ // Knowing the element indices being extracted from the original
+ // vector and the order in which they're being inserted, just put
+ // them at element indices required for the instruction.
+ for (unsigned i = 0; i < N->getNumOperands(); i++) {
+ if (DAG.getDataLayout().isLittleEndian())
+ ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
+ else
+ ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
+ CorrectElems = CorrectElems >> 8;
+ Elems = Elems >> 8;
+ }
+
+ SDValue Shuffle =
+ DAG.getVectorShuffle(Input.getValueType(), dl, Input,
+ DAG.getUNDEF(Input.getValueType()), ShuffleMask);
+
+ EVT Ty = N->getValueType(0);
+ SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle);
+ return BV;
+}
+
+// Look for build vector patterns where input operands come from sign
+// extended vector_extract elements of specific indices. If the correct indices
+// aren't used, add a vector shuffle to fix up the indices and create a new
+// PPCISD:SExtVElems node which selects the vector sign extend instructions
+// during instruction selection.
+static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
+ // This array encodes the indices that the vector sign extend instructions
+ // extract from when extending from one type to another for both BE and LE.
+ // The right nibble of each byte corresponds to the LE incides.
+ // and the left nibble of each byte corresponds to the BE incides.
+ // For example: 0x3074B8FC byte->word
+ // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
+ // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
+ // For example: 0x000070F8 byte->double word
+ // For LE: the allowed indices are: 0x0,0x8
+ // For BE: the allowed indices are: 0x7,0xF
+ uint64_t TargetElems[] = {
+ 0x3074B8FC, // b->w
+ 0x000070F8, // b->d
+ 0x10325476, // h->w
+ 0x00003074, // h->d
+ 0x00001032, // w->d
+ };
+
+ uint64_t Elems = 0;
+ int Index;
+ SDValue Input;
+
+ auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
+ if (!Op)
+ return false;
+ if (Op.getOpcode() != ISD::SIGN_EXTEND)
+ return false;
+
+ SDValue Extract = Op.getOperand(0);
+ if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return false;
+
+ ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
+ if (!ExtOp)
+ return false;
+
+ Index = ExtOp->getZExtValue();
+ if (Input && Input != Extract.getOperand(0))
+ return false;
+
+ if (!Input)
+ Input = Extract.getOperand(0);
+
+ Elems = Elems << 8;
+ Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
+ Elems |= Index;
+
+ return true;
+ };
+
+ // If the build vector operands aren't sign extended vector extracts,
+ // of the same input vector, then return.
+ for (unsigned i = 0; i < N->getNumOperands(); i++) {
+ if (!isSExtOfVecExtract(N->getOperand(i))) {
+ return SDValue();
+ }
+ }
+
+ // If the vector extract indicies are not correct, add the appropriate
+ // vector_shuffle.
+ int TgtElemArrayIdx;
+ int InputSize = Input.getValueType().getScalarSizeInBits();
+ int OutputSize = N->getValueType(0).getScalarSizeInBits();
+ if (InputSize + OutputSize == 40)
+ TgtElemArrayIdx = 0;
+ else if (InputSize + OutputSize == 72)
+ TgtElemArrayIdx = 1;
+ else if (InputSize + OutputSize == 48)
+ TgtElemArrayIdx = 2;
+ else if (InputSize + OutputSize == 80)
+ TgtElemArrayIdx = 3;
+ else if (InputSize + OutputSize == 96)
+ TgtElemArrayIdx = 4;
+ else
+ return SDValue();
+
+ uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
+ CorrectElems = DAG.getDataLayout().isLittleEndian()
+ ? CorrectElems & 0x0F0F0F0F0F0F0F0F
+ : CorrectElems & 0xF0F0F0F0F0F0F0F0;
+ if (Elems != CorrectElems) {
+ return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
+ }
+
+ // Regular lowering will catch cases where a shuffle is not needed.
+ return SDValue();
+}
+
SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
DAGCombinerInfo &DCI) const {
assert(N->getOpcode() == ISD::BUILD_VECTOR &&
@@ -11338,6 +11478,15 @@ SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
if (Reduced)
return Reduced;
+ // If we're building a vector out of extended elements from another vector
+ // we have P9 vector integer extend instructions.
+ if (Subtarget.hasP9Altivec()) {
+ Reduced = combineBVOfVecSExt(N, DAG);
+ if (Reduced)
+ return Reduced;
+ }
+
+
if (N->getValueType(0) != MVT::v2f64)
return SDValue();
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index a5108727bb4b..821927d3b157 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -67,6 +67,10 @@ namespace llvm {
/// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
VEXTS,
+ /// SExtVElems, takes an input vector of a smaller type and sign
+ /// extends to an output vector of a larger type.
+ SExtVElems,
+
/// Reciprocal estimate instructions (unary FP ops).
FRE, FRSQRTE,
@@ -1092,6 +1096,9 @@ namespace llvm {
ISD::ArgFlagsTy &ArgFlags,
CCState &State);
+ bool isIntS16Immediate(SDNode *N, int16_t &Imm);
+ bool isIntS16Immediate(SDValue Op, int16_t &Imm);
+
} // end namespace llvm
#endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td
index 47d59c25392a..6d9f55206b6a 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/lib/Target/PowerPC/PPCInstrInfo.td
@@ -32,6 +32,9 @@ def SDT_PPCstxsix : SDTypeProfile<0, 3, [
def SDT_PPCVexts : SDTypeProfile<1, 2, [
SDTCisVT<0, f64>, SDTCisVT<1, f64>, SDTCisPtrTy<2>
]>;
+def SDT_PPCSExtVElems : SDTypeProfile<1, 1, [
+ SDTCisVec<0>, SDTCisVec<1>
+]>;
def SDT_PPCCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32>,
SDTCisVT<1, i32> ]>;
@@ -131,6 +134,7 @@ def PPClxsizx : SDNode<"PPCISD::LXSIZX", SDT_PPCLxsizx,
def PPCstxsix : SDNode<"PPCISD::STXSIX", SDT_PPCstxsix,
[SDNPHasChain, SDNPMayStore]>;
def PPCVexts : SDNode<"PPCISD::VEXTS", SDT_PPCVexts, []>;
+def PPCSExtVElems : SDNode<"PPCISD::SExtVElems", SDT_PPCSExtVElems, []>;
// Extract FPSCR (not modeled at the DAG level).
def PPCmffs : SDNode<"PPCISD::MFFS",
@@ -4450,3 +4454,190 @@ def MSGSYNC : XForm_0<31, 886, (outs), (ins), "msgsync", IIC_SprMSGSYNC, []>;
def STOP : XForm_0<19, 370, (outs), (ins), "stop", IIC_SprSTOP, []>;
} // IsISA3_0
+
+// Fast 32-bit reverse bits algorithm:
+// Step 1: 1-bit swap (swap odd 1-bit and even 1-bit):
+// n = ((n >> 1) & 0x55555555) | ((n << 1) & 0xAAAAAAAA);
+// Step 2: 2-bit swap (swap odd 2-bit and even 2-bit):
+// n = ((n >> 2) & 0x33333333) | ((n << 2) & 0xCCCCCCCC);
+// Step 3: 4-bit swap (swap odd 4-bit and even 4-bit):
+// n = ((n >> 4) & 0x0F0F0F0F) | ((n << 4) & 0xF0F0F0F0);
+// Step 4: byte reverse (Suppose n = [B1,B2,B3,B4]):
+// Step 4.1: Put B4,B2 in the right position (rotate left 3 bytes):
+// n' = (n rotl 24); After which n' = [B4, B1, B2, B3]
+// Step 4.2: Insert B3 to the right position:
+// n' = rlwimi n', n, 8, 8, 15; After which n' = [B4, B3, B2, B3]
+// Step 4.3: Insert B1 to the right position:
+// n' = rlwimi n', n, 8, 24, 31; After which n' = [B4, B3, B2, B1]
+def MaskValues {
+ dag Lo1 = (ORI (LIS 0x5555), 0x5555);
+ dag Hi1 = (ORI (LIS 0xAAAA), 0xAAAA);
+ dag Lo2 = (ORI (LIS 0x3333), 0x3333);
+ dag Hi2 = (ORI (LIS 0xCCCC), 0xCCCC);
+ dag Lo4 = (ORI (LIS 0x0F0F), 0x0F0F);
+ dag Hi4 = (ORI (LIS 0xF0F0), 0xF0F0);
+}
+
+def Shift1 {
+ dag Right = (RLWINM $A, 31, 1, 31);
+ dag Left = (RLWINM $A, 1, 0, 30);
+}
+
+def Swap1 {
+ dag Bit = (OR (AND Shift1.Right, MaskValues.Lo1),
+ (AND Shift1.Left, MaskValues.Hi1));
+}
+
+def Shift2 {
+ dag Right = (RLWINM Swap1.Bit, 30, 2, 31);
+ dag Left = (RLWINM Swap1.Bit, 2, 0, 29);
+}
+
+def Swap2 {
+ dag Bits = (OR (AND Shift2.Right, MaskValues.Lo2),
+ (AND Shift2.Left, MaskValues.Hi2));
+}
+
+def Shift4 {
+ dag Right = (RLWINM Swap2.Bits, 28, 4, 31);
+ dag Left = (RLWINM Swap2.Bits, 4, 0, 27);
+}
+
+def Swap4 {
+ dag Bits = (OR (AND Shift4.Right, MaskValues.Lo4),
+ (AND Shift4.Left, MaskValues.Hi4));
+}
+
+def Rotate {
+ dag Left3Bytes = (RLWINM Swap4.Bits, 24, 0, 31);
+}
+
+def RotateInsertByte3 {
+ dag Left = (RLWIMI Rotate.Left3Bytes, Swap4.Bits, 8, 8, 15);
+}
+
+def RotateInsertByte1 {
+ dag Left = (RLWIMI RotateInsertByte3.Left, Swap4.Bits, 8, 24, 31);
+}
+
+def : Pat<(i32 (bitreverse i32:$A)),
+ (RLDICL_32 RotateInsertByte1.Left, 0, 32)>;
+
+// Fast 64-bit reverse bits algorithm:
+// Step 1: 1-bit swap (swap odd 1-bit and even 1-bit):
+// n = ((n >> 1) & 0x5555555555555555) | ((n << 1) & 0xAAAAAAAAAAAAAAAA);
+// Step 2: 2-bit swap (swap odd 2-bit and even 2-bit):
+// n = ((n >> 2) & 0x3333333333333333) | ((n << 2) & 0xCCCCCCCCCCCCCCCC);
+// Step 3: 4-bit swap (swap odd 4-bit and even 4-bit):
+// n = ((n >> 4) & 0x0F0F0F0F0F0F0F0F) | ((n << 4) & 0xF0F0F0F0F0F0F0F0);
+// Step 4: byte reverse (Suppose n = [B1,B2,B3,B4,B5,B6,B7,B8]):
+// Apply the same byte reverse algorithm mentioned above for the fast 32-bit
+// reverse to both the high 32 bit and low 32 bit of the 64 bit value. And
+// then OR them together to get the final result.
+def MaskValues64 {
+ dag Lo1 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Lo1, sub_32));
+ dag Hi1 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Hi1, sub_32));
+ dag Lo2 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Lo2, sub_32));
+ dag Hi2 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Hi2, sub_32));
+ dag Lo4 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Lo4, sub_32));
+ dag Hi4 = (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), MaskValues.Hi4, sub_32));
+}
+
+def DWMaskValues {
+ dag Lo1 = (ORI8 (ORIS8 (RLDICR MaskValues64.Lo1, 32, 31), 0x5555), 0x5555);
+ dag Hi1 = (ORI8 (ORIS8 (RLDICR MaskValues64.Hi1, 32, 31), 0xAAAA), 0xAAAA);
+ dag Lo2 = (ORI8 (ORIS8 (RLDICR MaskValues64.Lo2, 32, 31), 0x3333), 0x3333);
+ dag Hi2 = (ORI8 (ORIS8 (RLDICR MaskValues64.Hi2, 32, 31), 0xCCCC), 0xCCCC);
+ dag Lo4 = (ORI8 (ORIS8 (RLDICR MaskValues64.Lo4, 32, 31), 0x0F0F), 0x0F0F);
+ dag Hi4 = (ORI8 (ORIS8 (RLDICR MaskValues64.Hi4, 32, 31), 0xF0F0), 0xF0F0);
+}
+
+def DWShift1 {
+ dag Right = (RLDICL $A, 63, 1);
+ dag Left = (RLDICR $A, 1, 62);
+}
+
+def DWSwap1 {
+ dag Bit = (OR8 (AND8 DWShift1.Right, DWMaskValues.Lo1),
+ (AND8 DWShift1.Left, DWMaskValues.Hi1));
+}
+
+def DWShift2 {
+ dag Right = (RLDICL DWSwap1.Bit, 62, 2);
+ dag Left = (RLDICR DWSwap1.Bit, 2, 61);
+}
+
+def DWSwap2 {
+ dag Bits = (OR8 (AND8 DWShift2.Right, DWMaskValues.Lo2),
+ (AND8 DWShift2.Left, DWMaskValues.Hi2));
+}
+
+def DWShift4 {
+ dag Right = (RLDICL DWSwap2.Bits, 60, 4);
+ dag Left = (RLDICR DWSwap2.Bits, 4, 59);
+}
+
+def DWSwap4 {
+ dag Bits = (OR8 (AND8 DWShift4.Right, DWMaskValues.Lo4),
+ (AND8 DWShift4.Left, DWMaskValues.Hi4));
+}
+
+// Bit swap is done, now start byte swap.
+def DWExtractLo32 {
+ dag SubReg = (i32 (EXTRACT_SUBREG DWSwap4.Bits, sub_32));
+}
+
+def DWRotateLo32 {
+ dag Left24 = (RLWINM DWExtractLo32.SubReg, 24, 0, 31);
+}
+
+def DWLo32RotateInsertByte3 {
+ dag Left = (RLWIMI DWRotateLo32.Left24, DWExtractLo32.SubReg, 8, 8, 15);
+}
+
+// Lower 32 bits in the right order
+def DWLo32RotateInsertByte1 {
+ dag Left =
+ (RLWIMI DWLo32RotateInsertByte3.Left, DWExtractLo32.SubReg, 8, 24, 31);
+}
+
+def ExtendLo32 {
+ dag To64Bit =
+ (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ DWLo32RotateInsertByte1.Left, sub_32));
+}
+
+def DWShiftHi32 { // SRDI DWSwap4.Bits, 32)
+ dag ToLo32 = (RLDICL DWSwap4.Bits, 32, 32);
+}
+
+def DWExtractHi32 {
+ dag SubReg = (i32 (EXTRACT_SUBREG DWShiftHi32.ToLo32, sub_32));
+}
+
+def DWRotateHi32 {
+ dag Left24 = (RLWINM DWExtractHi32.SubReg, 24, 0, 31);
+}
+
+def DWHi32RotateInsertByte3 {
+ dag Left = (RLWIMI DWRotateHi32.Left24, DWExtractHi32.SubReg, 8, 8, 15);
+}
+
+// High 32 bits in the right order, but in the low 32-bit position
+def DWHi32RotateInsertByte1 {
+ dag Left =
+ (RLWIMI DWHi32RotateInsertByte3.Left, DWExtractHi32.SubReg, 8, 24, 31);
+}
+
+def ExtendHi32 {
+ dag To64Bit =
+ (i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ DWHi32RotateInsertByte1.Left, sub_32));
+}
+
+def DWShiftLo32 { // SLDI ExtendHi32.To64Bit, 32
+ dag ToHi32 = (RLDICR ExtendHi32.To64Bit, 32, 31);
+}
+
+def : Pat<(i64 (bitreverse i64:$A)),
+ (OR8 DWShiftLo32.ToHi32, ExtendLo32.To64Bit)>;
diff --git a/lib/Target/PowerPC/PPCInstrVSX.td b/lib/Target/PowerPC/PPCInstrVSX.td
index 9cfc897cdb3f..43635a8919e2 100644
--- a/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/lib/Target/PowerPC/PPCInstrVSX.td
@@ -1901,6 +1901,98 @@ let Predicates = [IsLittleEndian, HasVSX] in
def : Pat<(v4i32 (int_ppc_vsx_lxvw4x_be xoaddr:$src)), (LXVW4X xoaddr:$src)>;
def : Pat<(v2f64 (int_ppc_vsx_lxvd2x_be xoaddr:$src)), (LXVD2X xoaddr:$src)>;
+// Variable index unsigned vector_extract on Power9
+let Predicates = [HasP9Altivec, IsLittleEndian] in {
+ def : Pat<(i64 (anyext (i32 (vector_extract v16i8:$S, i64:$Idx)))),
+ (VEXTUBRX $Idx, $S)>;
+
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, i64:$Idx)))),
+ (VEXTUHRX (RLWINM8 $Idx, 1, 28, 30), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 0)))),
+ (VEXTUHRX (LI8 0), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 1)))),
+ (VEXTUHRX (LI8 2), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 2)))),
+ (VEXTUHRX (LI8 4), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 3)))),
+ (VEXTUHRX (LI8 6), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 4)))),
+ (VEXTUHRX (LI8 8), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 5)))),
+ (VEXTUHRX (LI8 10), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 6)))),
+ (VEXTUHRX (LI8 12), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 7)))),
+ (VEXTUHRX (LI8 14), $S)>;
+
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, i64:$Idx)))),
+ (VEXTUWRX (RLWINM8 $Idx, 2, 28, 29), $S)>;
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 0)))),
+ (VEXTUWRX (LI8 0), $S)>;
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 1)))),
+ (VEXTUWRX (LI8 4), $S)>;
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 2)))),
+ (VEXTUWRX (LI8 8), $S)>;
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 3)))),
+ (VEXTUWRX (LI8 12), $S)>;
+
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, i64:$Idx)))),
+ (EXTSW (VEXTUWRX (RLWINM8 $Idx, 2, 28, 29), $S))>;
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 0)))),
+ (EXTSW (VEXTUWRX (LI8 0), $S))>;
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 1)))),
+ (EXTSW (VEXTUWRX (LI8 4), $S))>;
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 2)))),
+ (EXTSW (VEXTUWRX (LI8 8), $S))>;
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 3)))),
+ (EXTSW (VEXTUWRX (LI8 12), $S))>;
+}
+let Predicates = [HasP9Altivec, IsBigEndian] in {
+ def : Pat<(i64 (anyext (i32 (vector_extract v16i8:$S, i64:$Idx)))),
+ (VEXTUBLX $Idx, $S)>;
+
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, i64:$Idx)))),
+ (VEXTUHLX (RLWINM8 $Idx, 1, 28, 30), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 0)))),
+ (VEXTUHLX (LI8 0), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 1)))),
+ (VEXTUHLX (LI8 2), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 2)))),
+ (VEXTUHLX (LI8 4), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 3)))),
+ (VEXTUHLX (LI8 6), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 4)))),
+ (VEXTUHLX (LI8 8), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 5)))),
+ (VEXTUHLX (LI8 10), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 6)))),
+ (VEXTUHLX (LI8 12), $S)>;
+ def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 7)))),
+ (VEXTUHLX (LI8 14), $S)>;
+
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, i64:$Idx)))),
+ (VEXTUWLX (RLWINM8 $Idx, 2, 28, 29), $S)>;
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 0)))),
+ (VEXTUWLX (LI8 0), $S)>;
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 1)))),
+ (VEXTUWLX (LI8 4), $S)>;
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 2)))),
+ (VEXTUWLX (LI8 8), $S)>;
+ def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 3)))),
+ (VEXTUWLX (LI8 12), $S)>;
+
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, i64:$Idx)))),
+ (EXTSW (VEXTUWLX (RLWINM8 $Idx, 2, 28, 29), $S))>;
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 0)))),
+ (EXTSW (VEXTUWLX (LI8 0), $S))>;
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 1)))),
+ (EXTSW (VEXTUWLX (LI8 4), $S))>;
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 2)))),
+ (EXTSW (VEXTUWLX (LI8 8), $S))>;
+ def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 3)))),
+ (EXTSW (VEXTUWLX (LI8 12), $S))>;
+}
+
let Predicates = [IsLittleEndian, HasDirectMove] in {
// v16i8 scalar <-> vector conversions (LE)
def : Pat<(v16i8 (scalar_to_vector i32:$A)),
@@ -2729,36 +2821,54 @@ def DblToFlt {
}
def ByteToWord {
- dag A0 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 0)), i8));
- dag A1 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 4)), i8));
- dag A2 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 8)), i8));
- dag A3 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 12)), i8));
+ dag LE_A0 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 0)), i8));
+ dag LE_A1 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 4)), i8));
+ dag LE_A2 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 8)), i8));
+ dag LE_A3 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 12)), i8));
+ dag BE_A0 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 3)), i8));
+ dag BE_A1 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 7)), i8));
+ dag BE_A2 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 11)), i8));
+ dag BE_A3 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 15)), i8));
}
def ByteToDWord {
- dag A0 = (i64 (sext_inreg
- (i64 (anyext (i32 (vector_extract v16i8:$A, 0)))), i8));
- dag A1 = (i64 (sext_inreg
- (i64 (anyext (i32 (vector_extract v16i8:$A, 8)))), i8));
+ dag LE_A0 = (i64 (sext_inreg
+ (i64 (anyext (i32 (vector_extract v16i8:$A, 0)))), i8));
+ dag LE_A1 = (i64 (sext_inreg
+ (i64 (anyext (i32 (vector_extract v16i8:$A, 8)))), i8));
+ dag BE_A0 = (i64 (sext_inreg
+ (i64 (anyext (i32 (vector_extract v16i8:$A, 7)))), i8));
+ dag BE_A1 = (i64 (sext_inreg
+ (i64 (anyext (i32 (vector_extract v16i8:$A, 15)))), i8));
}
def HWordToWord {
- dag A0 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 0)), i16));
- dag A1 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 2)), i16));
- dag A2 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 4)), i16));
- dag A3 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 6)), i16));
+ dag LE_A0 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 0)), i16));
+ dag LE_A1 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 2)), i16));
+ dag LE_A2 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 4)), i16));
+ dag LE_A3 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 6)), i16));
+ dag BE_A0 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 1)), i16));
+ dag BE_A1 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 3)), i16));
+ dag BE_A2 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 5)), i16));
+ dag BE_A3 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 7)), i16));
}
def HWordToDWord {
- dag A0 = (i64 (sext_inreg
- (i64 (anyext (i32 (vector_extract v8i16:$A, 0)))), i16));
- dag A1 = (i64 (sext_inreg
- (i64 (anyext (i32 (vector_extract v8i16:$A, 4)))), i16));
+ dag LE_A0 = (i64 (sext_inreg
+ (i64 (anyext (i32 (vector_extract v8i16:$A, 0)))), i16));
+ dag LE_A1 = (i64 (sext_inreg
+ (i64 (anyext (i32 (vector_extract v8i16:$A, 4)))), i16));
+ dag BE_A0 = (i64 (sext_inreg
+ (i64 (anyext (i32 (vector_extract v8i16:$A, 3)))), i16));
+ dag BE_A1 = (i64 (sext_inreg
+ (i64 (anyext (i32 (vector_extract v8i16:$A, 7)))), i16));
}
def WordToDWord {
- dag A0 = (i64 (sext (i32 (vector_extract v4i32:$A, 0))));
- dag A1 = (i64 (sext (i32 (vector_extract v4i32:$A, 2))));
+ dag LE_A0 = (i64 (sext (i32 (vector_extract v4i32:$A, 0))));
+ dag LE_A1 = (i64 (sext (i32 (vector_extract v4i32:$A, 2))));
+ dag BE_A0 = (i64 (sext (i32 (vector_extract v4i32:$A, 1))));
+ dag BE_A1 = (i64 (sext (i32 (vector_extract v4i32:$A, 3))));
}
def FltToIntLoad {
@@ -3016,18 +3126,46 @@ let AddedComplexity = 400 in {
// P9 Altivec instructions that can be used to build vectors.
// Adding them to PPCInstrVSX.td rather than PPCAltivecVSX.td to compete
// with complexities of existing build vector patterns in this file.
- let Predicates = [HasP9Altivec] in {
- def : Pat<(v2i64 (build_vector WordToDWord.A0, WordToDWord.A1)),
+ let Predicates = [HasP9Altivec, IsLittleEndian] in {
+ def : Pat<(v2i64 (build_vector WordToDWord.LE_A0, WordToDWord.LE_A1)),
(v2i64 (VEXTSW2D $A))>;
- def : Pat<(v2i64 (build_vector HWordToDWord.A0, HWordToDWord.A1)),
+ def : Pat<(v2i64 (build_vector HWordToDWord.LE_A0, HWordToDWord.LE_A1)),
(v2i64 (VEXTSH2D $A))>;
- def : Pat<(v4i32 (build_vector HWordToWord.A0, HWordToWord.A1,
- HWordToWord.A2, HWordToWord.A3)),
+ def : Pat<(v4i32 (build_vector HWordToWord.LE_A0, HWordToWord.LE_A1,
+ HWordToWord.LE_A2, HWordToWord.LE_A3)),
(v4i32 (VEXTSH2W $A))>;
- def : Pat<(v4i32 (build_vector ByteToWord.A0, ByteToWord.A1,
- ByteToWord.A2, ByteToWord.A3)),
+ def : Pat<(v4i32 (build_vector ByteToWord.LE_A0, ByteToWord.LE_A1,
+ ByteToWord.LE_A2, ByteToWord.LE_A3)),
(v4i32 (VEXTSB2W $A))>;
- def : Pat<(v2i64 (build_vector ByteToDWord.A0, ByteToDWord.A1)),
+ def : Pat<(v2i64 (build_vector ByteToDWord.LE_A0, ByteToDWord.LE_A1)),
(v2i64 (VEXTSB2D $A))>;
}
+
+ let Predicates = [HasP9Altivec, IsBigEndian] in {
+ def : Pat<(v2i64 (build_vector WordToDWord.BE_A0, WordToDWord.BE_A1)),
+ (v2i64 (VEXTSW2D $A))>;
+ def : Pat<(v2i64 (build_vector HWordToDWord.BE_A0, HWordToDWord.BE_A1)),
+ (v2i64 (VEXTSH2D $A))>;
+ def : Pat<(v4i32 (build_vector HWordToWord.BE_A0, HWordToWord.BE_A1,
+ HWordToWord.BE_A2, HWordToWord.BE_A3)),
+ (v4i32 (VEXTSH2W $A))>;
+ def : Pat<(v4i32 (build_vector ByteToWord.BE_A0, ByteToWord.BE_A1,
+ ByteToWord.BE_A2, ByteToWord.BE_A3)),
+ (v4i32 (VEXTSB2W $A))>;
+ def : Pat<(v2i64 (build_vector ByteToDWord.BE_A0, ByteToDWord.BE_A1)),
+ (v2i64 (VEXTSB2D $A))>;
+ }
+
+ let Predicates = [HasP9Altivec] in {
+ def: Pat<(v2i64 (PPCSExtVElems v16i8:$A)),
+ (v2i64 (VEXTSB2D $A))>;
+ def: Pat<(v2i64 (PPCSExtVElems v8i16:$A)),
+ (v2i64 (VEXTSH2D $A))>;
+ def: Pat<(v2i64 (PPCSExtVElems v4i32:$A)),
+ (v2i64 (VEXTSW2D $A))>;
+ def: Pat<(v4i32 (PPCSExtVElems v16i8:$A)),
+ (v4i32 (VEXTSB2W $A))>;
+ def: Pat<(v4i32 (PPCSExtVElems v8i16:$A)),
+ (v4i32 (VEXTSH2W $A))>;
+ }
}
diff --git a/lib/Target/PowerPC/PPCScheduleP9.td b/lib/Target/PowerPC/PPCScheduleP9.td
index a9c1bd78b05e..a01995a629c2 100644
--- a/lib/Target/PowerPC/PPCScheduleP9.td
+++ b/lib/Target/PowerPC/PPCScheduleP9.td
@@ -260,8 +260,8 @@ let SchedModel = P9Model in {
// ***************** Defining Itinerary Class Resources *****************
- def : ItinRW<[P9_DFU_76C, IP_EXEC_1C, DISP_1C, DISP_1C], [IIC_IntSimple,
- IIC_IntGeneral]>;
+ def : ItinRW<[P9_ALU_2C, IP_EXEC_1C, DISP_1C, DISP_1C],
+ [IIC_IntSimple, IIC_IntGeneral]>;
def : ItinRW<[P9_ALU_2C, IP_EXEC_1C, DISP_1C, DISP_1C, DISP_1C],
[IIC_IntISEL, IIC_IntRotate, IIC_IntShift]>;
diff --git a/lib/Target/PowerPC/PPCSubtarget.h b/lib/Target/PowerPC/PPCSubtarget.h
index 5a97f595ad8c..90d11f46a384 100644
--- a/lib/Target/PowerPC/PPCSubtarget.h
+++ b/lib/Target/PowerPC/PPCSubtarget.h
@@ -272,6 +272,13 @@ public:
return 16;
}
+
+ // DarwinABI has a 224-byte red zone. PPC32 SVR4ABI(Non-DarwinABI) has no
+ // red zone and PPC64 SVR4ABI has a 288-byte red zone.
+ unsigned getRedZoneSize() const {
+ return isDarwinABI() ? 224 : (isPPC64() ? 288 : 0);
+ }
+
bool hasHTM() const { return HasHTM; }
bool hasFusion() const { return HasFusion; }
bool hasFloat128() const { return HasFloat128; }
diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
index 491eaf326a50..7d34efd4af3e 100644
--- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
+++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
@@ -195,8 +195,10 @@ public:
return false;
// If we don't have VSX on the subtarget, don't do anything.
+ // Also, on Power 9 the load and store ops preserve element order and so
+ // the swaps are not required.
const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
- if (!STI.hasVSX())
+ if (!STI.hasVSX() || !STI.needsSwapsForVSXMemOps())
return false;
bool Changed = false;
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
index f85c0cf111c4..be83efc02d27 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
@@ -34,7 +34,7 @@ public:
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override;
+ uint64_t Value, bool IsResolved) const override;
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override;
@@ -73,7 +73,7 @@ bool RISCVAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
void RISCVAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
- bool IsPCRel) const {
+ bool IsResolved) const {
return;
}
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp b/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
index d4454c271f5a..0d021d67033e 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
@@ -211,6 +211,7 @@ namespace {
case Sparc::fixup_sparc_wplt30:
if (Target.getSymA()->getSymbol().isTemporary())
return false;
+ LLVM_FALLTHROUGH;
case Sparc::fixup_sparc_tls_gd_hi22:
case Sparc::fixup_sparc_tls_gd_lo10:
case Sparc::fixup_sparc_tls_gd_add:
@@ -275,7 +276,7 @@ namespace {
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override {
+ uint64_t Value, bool IsResolved) const override {
Value = adjustFixupValue(Fixup.getKind(), Value);
if (!Value) return; // Doesn't change encoding.
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
index 6b32a7926437..51ac410a9c81 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -52,7 +52,7 @@ public:
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override;
+ uint64_t Value, bool IsResolved) const override;
bool mayNeedRelaxation(const MCInst &Inst) const override {
return false;
}
@@ -94,7 +94,7 @@ void SystemZMCAsmBackend::applyFixup(const MCAssembler &Asm,
const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
- bool IsPCRel) const {
+ bool IsResolved) const {
MCFixupKind Kind = Fixup.getKind();
unsigned Offset = Fixup.getOffset();
unsigned BitSize = getFixupKindInfo(Kind).TargetSize;
diff --git a/lib/Target/SystemZ/SystemZHazardRecognizer.cpp b/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
index fe4b52b515e0..73a1036f88e0 100644
--- a/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
+++ b/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
@@ -26,7 +26,7 @@
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
// This is the limit of processor resource usage at which the
// scheduler should try to look for other instructions (not using the
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index fef4a8c92a36..2801141cd951 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2224,15 +2224,12 @@ static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
// Lower a binary operation that produces two VT results, one in each
// half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
-// Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
-// on the extended Op0 and (unextended) Op1. Store the even register result
+// and Opcode performs the GR128 operation. Store the even register result
// in Even and the odd register result in Odd.
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
- unsigned Extend, unsigned Opcode, SDValue Op0,
- SDValue Op1, SDValue &Even, SDValue &Odd) {
- SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0);
- SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
- SDValue(In128, 0), Op1);
+ unsigned Opcode, SDValue Op0, SDValue Op1,
+ SDValue &Even, SDValue &Odd) {
+ SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
bool Is32Bit = is32Bit(VT);
Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
@@ -2347,6 +2344,7 @@ static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
// Handle tests for order using (or (ogt y x) (oge x y)).
case ISD::SETUO:
Invert = true;
+ LLVM_FALLTHROUGH;
case ISD::SETO: {
assert(IsFP && "Unexpected integer comparison");
SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
@@ -2358,6 +2356,7 @@ static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
// Handle <> tests using (or (ogt y x) (ogt x y)).
case ISD::SETUEQ:
Invert = true;
+ LLVM_FALLTHROUGH;
case ISD::SETONE: {
assert(IsFP && "Unexpected integer comparison");
SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
@@ -2962,7 +2961,7 @@ SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
else {
- // Do a full 128-bit multiplication based on UMUL_LOHI64:
+ // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
//
// (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
//
@@ -2980,10 +2979,10 @@ SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
SDValue RL = Op.getOperand(1);
SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
- // UMUL_LOHI64 returns the low result in the odd register and the high
- // result in the even register. SMUL_LOHI is defined to return the
- // low half first, so the results are in reverse order.
- lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
+ // SystemZISD::UMUL_LOHI returns the low result in the odd register and
+ // the high result in the even register. ISD::SMUL_LOHI is defined to
+ // return the low half first, so the results are in reverse order.
+ lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
LL, RL, Ops[1], Ops[0]);
SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
@@ -3004,10 +3003,10 @@ SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
else
- // UMUL_LOHI64 returns the low result in the odd register and the high
- // result in the even register. UMUL_LOHI is defined to return the
- // low half first, so the results are in reverse order.
- lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
+ // SystemZISD::UMUL_LOHI returns the low result in the odd register and
+ // the high result in the even register. ISD::UMUL_LOHI is defined to
+ // return the low half first, so the results are in reverse order.
+ lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
return DAG.getMergeValues(Ops, DL);
}
@@ -3018,24 +3017,19 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
SDValue Op1 = Op.getOperand(1);
EVT VT = Op.getValueType();
SDLoc DL(Op);
- unsigned Opcode;
- // We use DSGF for 32-bit division.
- if (is32Bit(VT)) {
+ // We use DSGF for 32-bit division. This means the first operand must
+ // always be 64-bit, and the second operand should be 32-bit whenever
+ // that is possible, to improve performance.
+ if (is32Bit(VT))
Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
- Opcode = SystemZISD::SDIVREM32;
- } else if (DAG.ComputeNumSignBits(Op1) > 32) {
+ else if (DAG.ComputeNumSignBits(Op1) > 32)
Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
- Opcode = SystemZISD::SDIVREM32;
- } else
- Opcode = SystemZISD::SDIVREM64;
- // DSG(F) takes a 64-bit dividend, so the even register in the GR128
- // input is "don't care". The instruction returns the remainder in
- // the even register and the quotient in the odd register.
+ // DSG(F) returns the remainder in the even register and the
+ // quotient in the odd register.
SDValue Ops[2];
- lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
- Op0, Op1, Ops[1], Ops[0]);
+ lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
return DAG.getMergeValues(Ops, DL);
}
@@ -3044,16 +3038,11 @@ SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
- // DL(G) uses a double-width dividend, so we need to clear the even
- // register in the GR128 input. The instruction returns the remainder
- // in the even register and the quotient in the odd register.
+ // DL(G) returns the remainder in the even register and the
+ // quotient in the odd register.
SDValue Ops[2];
- if (is32Bit(VT))
- lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32,
- Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
- else
- lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
- Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
+ lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM,
+ Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
return DAG.getMergeValues(Ops, DL);
}
@@ -3193,13 +3182,13 @@ SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
SDLoc DL(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
- SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
+ SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
- FenceScope == CrossThread) {
+ FenceSSID == SyncScope::System) {
return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
Op.getOperand(0)),
0);
@@ -4669,11 +4658,9 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(SELECT_CCMASK);
OPCODE(ADJDYNALLOC);
OPCODE(POPCNT);
- OPCODE(UMUL_LOHI64);
- OPCODE(SDIVREM32);
- OPCODE(SDIVREM64);
- OPCODE(UDIVREM32);
- OPCODE(UDIVREM64);
+ OPCODE(UMUL_LOHI);
+ OPCODE(SDIVREM);
+ OPCODE(UDIVREM);
OPCODE(MVC);
OPCODE(MVC_LOOP);
OPCODE(NC);
@@ -5778,14 +5765,12 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
return DoneMBB;
}
-// Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true
+// Emit an extension from a GR64 to a GR128. ClearEven is true
// if the high register of the GR128 value must be cleared or false if
-// it's "don't care". SubReg is subreg_l32 when extending a GR32
-// and subreg_l64 when extending a GR64.
+// it's "don't care".
MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
MachineBasicBlock *MBB,
- bool ClearEven,
- unsigned SubReg) const {
+ bool ClearEven) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
@@ -5808,7 +5793,7 @@ MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
In128 = NewIn128;
}
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
- .addReg(In128).addReg(Src).addImm(SubReg);
+ .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64);
MI.eraseFromParent();
return MBB;
@@ -6172,12 +6157,10 @@ MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
case SystemZ::CondStoreF64Inv:
return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
- case SystemZ::AEXT128_64:
- return emitExt128(MI, MBB, false, SystemZ::subreg_l64);
- case SystemZ::ZEXT128_32:
- return emitExt128(MI, MBB, true, SystemZ::subreg_l32);
- case SystemZ::ZEXT128_64:
- return emitExt128(MI, MBB, true, SystemZ::subreg_l64);
+ case SystemZ::AEXT128:
+ return emitExt128(MI, MBB, false);
+ case SystemZ::ZEXT128:
+ return emitExt128(MI, MBB, true);
case SystemZ::ATOMIC_SWAPW:
return emitAtomicLoadBinary(MI, MBB, 0, 0);
diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h
index 5dcb19c0a35d..6c9c404816f0 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/lib/Target/SystemZ/SystemZISelLowering.h
@@ -86,14 +86,11 @@ enum NodeType : unsigned {
// Count number of bits set in operand 0 per byte.
POPCNT,
- // Wrappers around the ISD opcodes of the same name. The output and
- // first input operands are GR128s. The trailing numbers are the
- // widths of the second operand in bits.
- UMUL_LOHI64,
- SDIVREM32,
- SDIVREM64,
- UDIVREM32,
- UDIVREM64,
+ // Wrappers around the ISD opcodes of the same name. The output is GR128.
+ // Input operands may be GR64 or GR32, depending on the instruction.
+ UMUL_LOHI,
+ SDIVREM,
+ UDIVREM,
// Use a series of MVCs to copy bytes from one memory location to another.
// The operands are:
@@ -562,7 +559,7 @@ private:
unsigned StoreOpcode, unsigned STOCOpcode,
bool Invert) const;
MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB,
- bool ClearEven, unsigned SubReg) const;
+ bool ClearEven) const;
MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI,
MachineBasicBlock *BB,
unsigned BinOpcode, unsigned BitSize,
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td
index 98f66c29ae64..4569be7602e4 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -677,6 +677,22 @@ let Predicates = [FeatureLoadAndTrap] in {
def LLGTAT : UnaryRXY<"llgtat", 0xE39C, null_frag, GR64, 4>;
}
+// Extend GR64s to GR128s.
+let usesCustomInserter = 1 in
+ def ZEXT128 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
+
+//===----------------------------------------------------------------------===//
+// "Any" extensions
+//===----------------------------------------------------------------------===//
+
+// Use subregs to populate the "don't care" bits in a 32-bit to 64-bit anyext.
+def : Pat<(i64 (anyext GR32:$src)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32)>;
+
+// Extend GR64s to GR128s.
+let usesCustomInserter = 1 in
+ def AEXT128 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
+
//===----------------------------------------------------------------------===//
// Truncations
//===----------------------------------------------------------------------===//
@@ -1216,13 +1232,17 @@ def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load, 8>;
// Multiplication of a register, producing two results.
def MR : BinaryRR <"mr", 0x1C, null_frag, GR128, GR32>;
def MLR : BinaryRRE<"mlr", 0xB996, null_frag, GR128, GR32>;
-def MLGR : BinaryRRE<"mlgr", 0xB986, z_umul_lohi64, GR128, GR64>;
+def MLGR : BinaryRRE<"mlgr", 0xB986, null_frag, GR128, GR64>;
+def : Pat<(z_umul_lohi GR64:$src1, GR64:$src2),
+ (MLGR (AEXT128 GR64:$src1), GR64:$src2)>;
// Multiplication of memory, producing two results.
def M : BinaryRX <"m", 0x5C, null_frag, GR128, load, 4>;
def MFY : BinaryRXY<"mfy", 0xE35C, null_frag, GR128, load, 4>;
def ML : BinaryRXY<"ml", 0xE396, null_frag, GR128, load, 4>;
-def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load, 8>;
+def MLG : BinaryRXY<"mlg", 0xE386, null_frag, GR128, load, 8>;
+def : Pat<(z_umul_lohi GR64:$src1, (i64 (load bdxaddr20only:$src2))),
+ (MLG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>;
//===----------------------------------------------------------------------===//
// Division and remainder
@@ -1230,19 +1250,38 @@ def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load, 8>;
let hasSideEffects = 1 in { // Do not speculatively execute.
// Division and remainder, from registers.
- def DR : BinaryRR <"dr", 0x1D, null_frag, GR128, GR32>;
- def DSGFR : BinaryRRE<"dsgfr", 0xB91D, z_sdivrem32, GR128, GR32>;
- def DSGR : BinaryRRE<"dsgr", 0xB90D, z_sdivrem64, GR128, GR64>;
- def DLR : BinaryRRE<"dlr", 0xB997, z_udivrem32, GR128, GR32>;
- def DLGR : BinaryRRE<"dlgr", 0xB987, z_udivrem64, GR128, GR64>;
+ def DR : BinaryRR <"dr", 0x1D, null_frag, GR128, GR32>;
+ def DSGFR : BinaryRRE<"dsgfr", 0xB91D, null_frag, GR128, GR32>;
+ def DSGR : BinaryRRE<"dsgr", 0xB90D, null_frag, GR128, GR64>;
+ def DLR : BinaryRRE<"dlr", 0xB997, null_frag, GR128, GR32>;
+ def DLGR : BinaryRRE<"dlgr", 0xB987, null_frag, GR128, GR64>;
// Division and remainder, from memory.
- def D : BinaryRX <"d", 0x5D, null_frag, GR128, load, 4>;
- def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem32, GR128, load, 4>;
- def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load, 8>;
- def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load, 4>;
- def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>;
-}
+ def D : BinaryRX <"d", 0x5D, null_frag, GR128, load, 4>;
+ def DSGF : BinaryRXY<"dsgf", 0xE31D, null_frag, GR128, load, 4>;
+ def DSG : BinaryRXY<"dsg", 0xE30D, null_frag, GR128, load, 8>;
+ def DL : BinaryRXY<"dl", 0xE397, null_frag, GR128, load, 4>;
+ def DLG : BinaryRXY<"dlg", 0xE387, null_frag, GR128, load, 8>;
+}
+def : Pat<(z_sdivrem GR64:$src1, GR32:$src2),
+ (DSGFR (AEXT128 GR64:$src1), GR32:$src2)>;
+def : Pat<(z_sdivrem GR64:$src1, (i32 (load bdxaddr20only:$src2))),
+ (DSGF (AEXT128 GR64:$src1), bdxaddr20only:$src2)>;
+def : Pat<(z_sdivrem GR64:$src1, GR64:$src2),
+ (DSGR (AEXT128 GR64:$src1), GR64:$src2)>;
+def : Pat<(z_sdivrem GR64:$src1, (i64 (load bdxaddr20only:$src2))),
+ (DSG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>;
+
+def : Pat<(z_udivrem GR32:$src1, GR32:$src2),
+ (DLR (ZEXT128 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src1,
+ subreg_l32)), GR32:$src2)>;
+def : Pat<(z_udivrem GR32:$src1, (i32 (load bdxaddr20only:$src2))),
+ (DL (ZEXT128 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src1,
+ subreg_l32)), bdxaddr20only:$src2)>;
+def : Pat<(z_udivrem GR64:$src1, GR64:$src2),
+ (DLGR (ZEXT128 GR64:$src1), GR64:$src2)>;
+def : Pat<(z_udivrem GR64:$src1, (i64 (load bdxaddr20only:$src2))),
+ (DLG (ZEXT128 GR64:$src1), bdxaddr20only:$src2)>;
//===----------------------------------------------------------------------===//
// Shifts
@@ -1894,17 +1933,6 @@ def : Pat<(ctlz GR64:$src),
let Predicates = [FeaturePopulationCount], Defs = [CC] in
def POPCNT : UnaryRRE<"popcnt", 0xB9E1, z_popcnt, GR64, GR64>;
-// Use subregs to populate the "don't care" bits in a 32-bit to 64-bit anyext.
-def : Pat<(i64 (anyext GR32:$src)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32)>;
-
-// Extend GR32s and GR64s to GR128s.
-let usesCustomInserter = 1 in {
- def AEXT128_64 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
- def ZEXT128_32 : Pseudo<(outs GR128:$dst), (ins GR32:$src), []>;
- def ZEXT128_64 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
-}
-
// Search a block of memory for a character.
let mayLoad = 1, Defs = [CC] in
defm SRST : StringRRE<"srst", 0xB25E, z_search_string>;
diff --git a/lib/Target/SystemZ/SystemZLDCleanup.cpp b/lib/Target/SystemZ/SystemZLDCleanup.cpp
index 3a0e01da42f0..d4cd89ce590f 100644
--- a/lib/Target/SystemZ/SystemZLDCleanup.cpp
+++ b/lib/Target/SystemZ/SystemZLDCleanup.cpp
@@ -127,7 +127,7 @@ MachineInstr *SystemZLDCleanup::ReplaceTLSCall(MachineInstr *I,
return Copy;
}
-// Create a virtal register in *TLSBaseAddrReg, and populate it by
+// Create a virtual register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *SystemZLDCleanup::SetRegister(MachineInstr *I,
unsigned *TLSBaseAddrReg) {
diff --git a/lib/Target/SystemZ/SystemZMachineScheduler.cpp b/lib/Target/SystemZ/SystemZMachineScheduler.cpp
index b6feaa49d858..8342463c1086 100644
--- a/lib/Target/SystemZ/SystemZMachineScheduler.cpp
+++ b/lib/Target/SystemZ/SystemZMachineScheduler.cpp
@@ -18,7 +18,7 @@
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
#ifndef NDEBUG
// Print the set of SUs
diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td
index ab2392809f3b..9c6d5819f8a7 100644
--- a/lib/Target/SystemZ/SystemZOperators.td
+++ b/lib/Target/SystemZ/SystemZOperators.td
@@ -36,14 +36,10 @@ def SDT_ZWrapOffset : SDTypeProfile<1, 2,
SDTCisSameAs<0, 2>,
SDTCisPtrTy<0>]>;
def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>;
-def SDT_ZGR128Binary32 : SDTypeProfile<1, 2,
+def SDT_ZGR128Binary : SDTypeProfile<1, 2,
[SDTCisVT<0, untyped>,
- SDTCisVT<1, untyped>,
- SDTCisVT<2, i32>]>;
-def SDT_ZGR128Binary64 : SDTypeProfile<1, 2,
- [SDTCisVT<0, untyped>,
- SDTCisVT<1, untyped>,
- SDTCisVT<2, i64>]>;
+ SDTCisInt<1>,
+ SDTCisInt<2>]>;
def SDT_ZAtomicLoadBinaryW : SDTypeProfile<1, 5,
[SDTCisVT<0, i32>,
SDTCisPtrTy<1>,
@@ -185,11 +181,9 @@ def z_select_ccmask : SDNode<"SystemZISD::SELECT_CCMASK", SDT_ZSelectCCMask,
[SDNPInGlue]>;
def z_adjdynalloc : SDNode<"SystemZISD::ADJDYNALLOC", SDT_ZAdjDynAlloc>;
def z_popcnt : SDNode<"SystemZISD::POPCNT", SDTIntUnaryOp>;
-def z_umul_lohi64 : SDNode<"SystemZISD::UMUL_LOHI64", SDT_ZGR128Binary64>;
-def z_sdivrem32 : SDNode<"SystemZISD::SDIVREM32", SDT_ZGR128Binary32>;
-def z_sdivrem64 : SDNode<"SystemZISD::SDIVREM64", SDT_ZGR128Binary64>;
-def z_udivrem32 : SDNode<"SystemZISD::UDIVREM32", SDT_ZGR128Binary32>;
-def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>;
+def z_umul_lohi : SDNode<"SystemZISD::UMUL_LOHI", SDT_ZGR128Binary>;
+def z_sdivrem : SDNode<"SystemZISD::SDIVREM", SDT_ZGR128Binary>;
+def z_udivrem : SDNode<"SystemZISD::UDIVREM", SDT_ZGR128Binary>;
def z_membarrier : SDNode<"SystemZISD::MEMBARRIER", SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
diff --git a/lib/Target/SystemZ/SystemZScheduleZ13.td b/lib/Target/SystemZ/SystemZScheduleZ13.td
index adc9f2976f87..72543c1eaee2 100644
--- a/lib/Target/SystemZ/SystemZScheduleZ13.td
+++ b/lib/Target/SystemZ/SystemZScheduleZ13.td
@@ -15,7 +15,7 @@
def Z13Model : SchedMachineModel {
let UnsupportedFeatures = Arch11UnsupportedFeatures.List;
-
+
let IssueWidth = 8;
let MicroOpBufferSize = 60; // Issue queues
let LoadLatency = 1; // Optimistic load latency.
@@ -159,7 +159,7 @@ def : InstRW<[FXb], (instregex "CondReturn$")>;
// Select instructions
//===----------------------------------------------------------------------===//
-// Select pseudo
+// Select pseudo
def : InstRW<[FXa], (instregex "Select(32|64|32Mux)$")>;
// CondStore pseudos
@@ -226,7 +226,7 @@ def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVST$")>;
def : InstRW<[FXa, Lat2], (instregex "LOCRMux$")>;
def : InstRW<[FXa, Lat2], (instregex "LOC(G|FH)?R(Asm.*)?$")>;
-def : InstRW<[FXa, Lat2], (instregex "LOC(G|H)?HI(Asm.*)?$")>;
+def : InstRW<[FXa, Lat2], (instregex "LOC(G|H)?HI(Mux|(Asm.*))?$")>;
def : InstRW<[FXa, LSU, Lat6], (instregex "LOC(G|FH|Mux)?(Asm.*)?$")>;
def : InstRW<[FXb, LSU, Lat5], (instregex "STOC(G|FH|Mux)?(Asm.*)?$")>;
@@ -282,7 +282,7 @@ def : InstRW<[LSU, LSU, LSU, LSU, LSU, Lat10, GroupAlone],
(instregex "LM(H|Y|G)?$")>;
// Load multiple disjoint
-def : InstRW<[FXb, Lat30, GroupAlone], (instregex "LMD$")>;
+def : InstRW<[LSU, Lat30, GroupAlone], (instregex "LMD$")>;
// Store multiple (estimated average of ceil(5/2) FXb ops)
def : InstRW<[LSU, LSU, FXb, FXb, FXb, Lat10,
@@ -446,13 +446,13 @@ def : InstRW<[FXa, Lat6], (instregex "MS(R|FI)$")>;
def : InstRW<[FXa, LSU, Lat12], (instregex "MSG$")>;
def : InstRW<[FXa, Lat8], (instregex "MSGR$")>;
def : InstRW<[FXa, Lat6], (instregex "MSGF(I|R)$")>;
-def : InstRW<[FXa, LSU, Lat15, GroupAlone], (instregex "MLG$")>;
-def : InstRW<[FXa, Lat9, GroupAlone], (instregex "MLGR$")>;
+def : InstRW<[FXa2, LSU, Lat15, GroupAlone], (instregex "MLG$")>;
+def : InstRW<[FXa2, Lat9, GroupAlone], (instregex "MLGR$")>;
def : InstRW<[FXa, Lat5], (instregex "MGHI$")>;
def : InstRW<[FXa, Lat5], (instregex "MHI$")>;
def : InstRW<[FXa, LSU, Lat9], (instregex "MH(Y)?$")>;
-def : InstRW<[FXa, Lat7, GroupAlone], (instregex "M(L)?R$")>;
-def : InstRW<[FXa, LSU, Lat7, GroupAlone], (instregex "M(FY|L)?$")>;
+def : InstRW<[FXa2, Lat7, GroupAlone], (instregex "M(L)?R$")>;
+def : InstRW<[FXa2, LSU, Lat7, GroupAlone], (instregex "M(FY|L)?$")>;
//===----------------------------------------------------------------------===//
// Division and remainder
@@ -460,8 +460,8 @@ def : InstRW<[FXa, LSU, Lat7, GroupAlone], (instregex "M(FY|L)?$")>;
def : InstRW<[FXa2, FXa2, Lat20, GroupAlone], (instregex "DR$")>;
def : InstRW<[FXa2, FXa2, LSU, Lat30, GroupAlone], (instregex "D$")>;
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "DSG(F)?R$")>;
-def : InstRW<[LSU, FXa, Lat30, GroupAlone], (instregex "DSG(F)?$")>;
+def : InstRW<[FXa2, Lat30, GroupAlone], (instregex "DSG(F)?R$")>;
+def : InstRW<[LSU, FXa2, Lat30, GroupAlone], (instregex "DSG(F)?$")>;
def : InstRW<[FXa2, FXa2, Lat20, GroupAlone], (instregex "DLR$")>;
def : InstRW<[FXa2, FXa2, Lat30, GroupAlone], (instregex "DLGR$")>;
def : InstRW<[FXa2, FXa2, LSU, Lat30, GroupAlone], (instregex "DL(G)?$")>;
@@ -474,7 +474,8 @@ def : InstRW<[FXa], (instregex "SLL(G|K)?$")>;
def : InstRW<[FXa], (instregex "SRL(G|K)?$")>;
def : InstRW<[FXa], (instregex "SRA(G|K)?$")>;
def : InstRW<[FXa], (instregex "SLA(G|K)?$")>;
-def : InstRW<[FXa, FXa, FXa, FXa, Lat8], (instregex "S(L|R)D(A|L)$")>;
+def : InstRW<[FXa, FXa, FXa, FXa, LSU, Lat8, GroupAlone],
+ (instregex "S(L|R)D(A|L)$")>;
// Rotate
def : InstRW<[FXa, LSU, Lat6], (instregex "RLL(G)?$")>;
@@ -537,7 +538,7 @@ def : InstRW<[FXb], (instregex "TMLH(64)?$")>;
def : InstRW<[FXb], (instregex "TMLL(64)?$")>;
// Compare logical characters under mask
-def : InstRW<[FXb, LSU, Lat5], (instregex "CLM(H|Y)?$")>;
+def : InstRW<[FXb, LSU, Lat6], (instregex "CLM(H|Y)?$")>;
//===----------------------------------------------------------------------===//
// Prefetch and execution hint
@@ -573,7 +574,7 @@ def : InstRW<[FXa, FXa, FXb, FXb, LSU, FXb, FXb, LSU, LSU, Lat20, GroupAlone],
(instregex "CDSG$")>;
// Compare and swap and store
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "CSST$")>;
+def : InstRW<[FXa, LSU, Lat30], (instregex "CSST$")>;
// Perform locked operation
def : InstRW<[LSU, Lat30, GroupAlone], (instregex "PLO$")>;
@@ -589,36 +590,45 @@ def : InstRW<[LSU, LSU, Lat5, GroupAlone], (instregex "LPD(G)?$")>;
// Translate and convert
//===----------------------------------------------------------------------===//
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "TR(T|TR)?(E|EOpt)?$")>;
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "TR(T|O)(T|O)(Opt)?$")>;
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "CU(12|14|21|24|41|42)(Opt)?$")>;
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "(CUUTF|CUTFU)(Opt)?$")>;
+def : InstRW<[LSU, Lat30, GroupAlone], (instregex "TR$")>;
+def : InstRW<[FXa, FXa, FXa, LSU, LSU, Lat30, GroupAlone], (instregex "TRT$")>;
+def : InstRW<[FXa, LSU, Lat30], (instregex "TRTR$")>;
+def : InstRW<[FXa, Lat30], (instregex "TR(TR)?(T)?(E|EOpt)?$")>;
+def : InstRW<[LSU, Lat30], (instregex "TR(T|O)(T|O)(Opt)?$")>;
+def : InstRW<[FXa, Lat30], (instregex "CU(12|14|21|24|41|42)(Opt)?$")>;
+def : InstRW<[FXa, Lat30], (instregex "(CUUTF|CUTFU)(Opt)?$")>;
//===----------------------------------------------------------------------===//
// Message-security assist
//===----------------------------------------------------------------------===//
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "KM(C|F|O|CTR)?$")>;
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "(KIMD|KLMD|KMAC|PCC|PPNO)$")>;
+def : InstRW<[FXa, Lat30], (instregex "KM(C|F|O|CTR)?$")>;
+def : InstRW<[FXa, Lat30], (instregex "(KIMD|KLMD|KMAC|PCC|PPNO)$")>;
//===----------------------------------------------------------------------===//
// Decimal arithmetic
//===----------------------------------------------------------------------===//
-def : InstRW<[FXb, VecDF, LSU, Lat30, GroupAlone], (instregex "CVB(Y|G)?$")>;
-def : InstRW<[FXb, VecDF, FXb, Lat30, GroupAlone], (instregex "CVD(Y|G)?$")>;
-def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MV(N|Z|O)$")>;
+def : InstRW<[FXb, VecDF, VecDF, LSU, LSU, Lat30, GroupAlone],
+ (instregex "CVBG$")>;
+def : InstRW<[FXb, VecDF, LSU, Lat30, GroupAlone], (instregex "CVB(Y)?$")>;
+def : InstRW<[FXb, FXb, FXb, VecDF2, VecDF2, LSU, Lat30, GroupAlone],
+ (instregex "CVDG$")>;
+def : InstRW<[FXb, VecDF, FXb, LSU, Lat30, GroupAlone], (instregex "CVD(Y)?$")>;
+def : InstRW<[LSU, Lat10, GroupAlone], (instregex "MVO$")>;
+def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MV(N|Z)$")>;
def : InstRW<[LSU, Lat30, GroupAlone], (instregex "(PACK|PKA|PKU)$")>;
-def : InstRW<[LSU, Lat30, GroupAlone], (instregex "UNPK(A|U)?$")>;
+def : InstRW<[LSU, Lat12, GroupAlone], (instregex "UNPK(A|U)$")>;
+def : InstRW<[FXb, LSU, LSU, Lat9, BeginGroup], (instregex "UNPK$")>;
-def : InstRW<[FXb, VecDFX, LSU, LSU, Lat9, GroupAlone],
+def : InstRW<[FXb, VecDFX, LSU, LSU, LSU, Lat9, GroupAlone],
(instregex "(A|S|ZA)P$")>;
-def : InstRW<[FXb, VecDFX2, LSU, LSU, Lat30, GroupAlone],
+def : InstRW<[FXb, VecDFX2, VecDFX2, LSU, LSU, LSU, Lat30, GroupAlone],
(instregex "(M|D)P$")>;
-def : InstRW<[FXb, FXb, VecDFX2, LSU, LSU, LSU, Lat15, GroupAlone],
+def : InstRW<[FXb, VecDFX, VecDFX, LSU, LSU, Lat15, GroupAlone],
(instregex "SRP$")>;
def : InstRW<[VecDFX, LSU, LSU, Lat5, GroupAlone], (instregex "CP$")>;
-def : InstRW<[VecDFX, LSU, Lat4, GroupAlone], (instregex "TP$")>;
+def : InstRW<[VecDFX, LSU, Lat4, BeginGroup], (instregex "TP$")>;
def : InstRW<[LSU, Lat30, GroupAlone], (instregex "ED(MK)?$")>;
//===----------------------------------------------------------------------===//
@@ -688,25 +698,25 @@ def : InstRW<[FXb], (instregex "PPA$")>;
//===----------------------------------------------------------------------===//
// Find leftmost one
-def : InstRW<[FXa, Lat6, GroupAlone], (instregex "FLOGR$")>;
+def : InstRW<[FXa, FXa, Lat6, GroupAlone], (instregex "FLOGR$")>;
// Population count
def : InstRW<[FXa, Lat3], (instregex "POPCNT$")>;
// Extend
-def : InstRW<[FXa], (instregex "AEXT128_64$")>;
-def : InstRW<[FXa], (instregex "ZEXT128_(32|64)$")>;
+def : InstRW<[FXa], (instregex "AEXT128$")>;
+def : InstRW<[FXa], (instregex "ZEXT128$")>;
// String instructions
def : InstRW<[FXa, LSU, Lat30], (instregex "SRST$")>;
-def : InstRW<[LSU, Lat30], (instregex "SRSTU$")>;
+def : InstRW<[FXa, Lat30], (instregex "SRSTU$")>;
def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CUSE$")>;
// Various complex instructions
-def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CFC$")>;
-def : InstRW<[LSU, Lat30, GroupAlone], (instregex "UPT$")>;
-def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CKSM$")>;
-def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CMPSC$")>;
+def : InstRW<[LSU, Lat30], (instregex "CFC$")>;
+def : InstRW<[FXb, LSU, Lat30], (instregex "UPT$")>;
+def : InstRW<[LSU, Lat30], (instregex "CKSM$")>;
+def : InstRW<[FXa, Lat30], (instregex "CMPSC$")>;
// Execute
def : InstRW<[FXb, GroupAlone], (instregex "EX(RL)?$")>;
@@ -833,7 +843,7 @@ def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "FIXBR(A)?$")>;
// Addition
def : InstRW<[VecBF, LSU, Lat12], (instregex "A(E|D)B$")>;
def : InstRW<[VecBF], (instregex "A(E|D)BR$")>;
-def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "AXBR$")>;
+def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXBR$")>;
// Subtraction
def : InstRW<[VecBF, LSU, Lat12], (instregex "S(E|D)B$")>;
@@ -848,9 +858,9 @@ def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MXDBR$")>;
def : InstRW<[VecDF2, VecDF2, Lat20, GroupAlone], (instregex "MXBR$")>;
// Multiply and add / subtract
-def : InstRW<[VecBF, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>;
+def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>;
def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)EBR$")>;
-def : InstRW<[VecBF, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>;
+def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>;
def : InstRW<[VecBF], (instregex "M(A|S)DBR$")>;
// Division
@@ -859,7 +869,7 @@ def : InstRW<[VecFPd], (instregex "D(E|D)BR$")>;
def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "DXBR$")>;
// Divide to integer
-def : InstRW<[VecFPd, Lat30, GroupAlone], (instregex "DI(E|D)BR$")>;
+def : InstRW<[VecFPd, Lat30], (instregex "DI(E|D)BR$")>;
//===----------------------------------------------------------------------===//
// FP: Comparisons
@@ -882,8 +892,8 @@ def : InstRW<[FXa, LSU, Lat4, GroupAlone], (instregex "EFPC$")>;
def : InstRW<[FXb, LSU, Lat5, GroupAlone], (instregex "STFPC$")>;
def : InstRW<[LSU, Lat3, GroupAlone], (instregex "SFPC$")>;
def : InstRW<[LSU, LSU, Lat6, GroupAlone], (instregex "LFPC$")>;
-def : InstRW<[FXa, Lat30, GroupAlone], (instregex "SFASR$")>;
-def : InstRW<[FXa, LSU, Lat30, GroupAlone], (instregex "LFAS$")>;
+def : InstRW<[FXa, Lat30], (instregex "SFASR$")>;
+def : InstRW<[FXa, LSU, Lat30], (instregex "LFAS$")>;
def : InstRW<[FXb, Lat3, GroupAlone], (instregex "SRNM(B|T)?$")>;
@@ -904,7 +914,7 @@ def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "LTXR$")>;
// Load rounded
def : InstRW<[VecBF], (instregex "(LEDR|LRER)$")>;
def : InstRW<[VecBF], (instregex "LEXR$")>;
-def : InstRW<[VecDF2, VecDF2], (instregex "(LDXR|LRDR)$")>;
+def : InstRW<[VecDF2], (instregex "(LDXR|LRDR)$")>;
// Load lengthened
def : InstRW<[LSU], (instregex "LDE$")>;
@@ -955,7 +965,7 @@ def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "FIXR$")>;
// Addition
def : InstRW<[VecBF, LSU, Lat12], (instregex "A(E|D|U|W)$")>;
def : InstRW<[VecBF], (instregex "A(E|D|U|W)R$")>;
-def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "AXR$")>;
+def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXR$")>;
// Subtraction
def : InstRW<[VecBF, LSU, Lat12], (instregex "S(E|D|U|W)$")>;
@@ -968,16 +978,20 @@ def : InstRW<[VecBF], (instregex "M(D|DE|E|EE)R$")>;
def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MXD$")>;
def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MXDR$")>;
def : InstRW<[VecDF2, VecDF2, Lat20, GroupAlone], (instregex "MXR$")>;
-def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MY(H|L)?$")>;
-def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MY(H|L)?R$")>;
+def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MY$")>;
+def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "MY(H|L)$")>;
+def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MYR$")>;
+def : InstRW<[VecBF, GroupAlone], (instregex "MY(H|L)R$")>;
// Multiply and add / subtract
-def : InstRW<[VecBF, LSU, Lat12, GroupAlone], (instregex "M(A|S)E$")>;
+def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)E$")>;
def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)ER$")>;
-def : InstRW<[VecBF, LSU, Lat12, GroupAlone], (instregex "M(A|S)D$")>;
-def : InstRW<[VecBF], (instregex "M(A|S)DR$")>;
-def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MAY(H|L)?$")>;
-def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MAY(H|L)?R$")>;
+def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)D$")>;
+def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)DR$")>;
+def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "MAY(H|L)$")>;
+def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MAY$")>;
+def : InstRW<[VecBF, GroupAlone], (instregex "MAY(H|L)R$")>;
+def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MAYR$")>;
// Division
def : InstRW<[VecFPd, LSU], (instregex "D(E|D)$")>;
@@ -989,8 +1003,8 @@ def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "DXR$")>;
//===----------------------------------------------------------------------===//
// Compare
-def : InstRW<[VecXsPm, LSU, Lat8], (instregex "C(E|D)$")>;
-def : InstRW<[VecXsPm, Lat4], (instregex "C(E|D)R$")>;
+def : InstRW<[VecBF, LSU, Lat12], (instregex "C(E|D)$")>;
+def : InstRW<[VecBF], (instregex "C(E|D)R$")>;
def : InstRW<[VecDF, VecDF, Lat20, GroupAlone], (instregex "CXR$")>;
@@ -1032,7 +1046,7 @@ def : InstRW<[FXb, VecDF, VecDF, Lat30, BeginGroup], (instregex "CL(F|G)XTR$")>;
def : InstRW<[FXb, VecDF, Lat9, BeginGroup], (instregex "CD(S|U)TR$")>;
def : InstRW<[FXb, FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "CX(S|U)TR$")>;
def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "C(S|U)DTR$")>;
-def : InstRW<[FXb, FXb, VecDF2, VecDF2, Lat15, BeginGroup], (instregex "C(S|U)XTR$")>;
+def : InstRW<[FXb, FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "C(S|U)XTR$")>;
// Convert from / to zoned
def : InstRW<[LSU, VecDF, Lat11, BeginGroup], (instregex "CDZT$")>;
@@ -1047,7 +1061,7 @@ def : InstRW<[FXb, LSU, VecDF, Lat11, BeginGroup], (instregex "CPDT$")>;
def : InstRW<[FXb, LSU, VecDF, VecDF, Lat15, GroupAlone], (instregex "CPXT$")>;
// Perform floating-point operation
-def : InstRW<[LSU, Lat30, GroupAlone], (instregex "PFPO$")>;
+def : InstRW<[FXb, Lat30], (instregex "PFPO$")>;
//===----------------------------------------------------------------------===//
// DFP: Unary arithmetic
@@ -1071,7 +1085,7 @@ def : InstRW<[FXb, VecDF, VecDF, Lat15, BeginGroup], (instregex "ESXTR$")>;
// Addition
def : InstRW<[VecDF], (instregex "ADTR(A)?$")>;
-def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "AXTR(A)?$")>;
+def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXTR(A)?$")>;
// Subtraction
def : InstRW<[VecDF], (instregex "SDTR(A)?$")>;
@@ -1090,15 +1104,15 @@ def : InstRW<[VecDF], (instregex "QADTR$")>;
def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "QAXTR$")>;
// Reround
-def : InstRW<[FXb, VecDF, Lat11], (instregex "RRDTR$")>;
+def : InstRW<[FXb, VecDF, Lat11, BeginGroup], (instregex "RRDTR$")>;
def : InstRW<[FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "RRXTR$")>;
// Shift significand left/right
-def : InstRW<[LSU, VecDF, Lat11], (instregex "S(L|R)DT$")>;
+def : InstRW<[LSU, VecDF, Lat11, GroupAlone], (instregex "S(L|R)DT$")>;
def : InstRW<[LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "S(L|R)XT$")>;
// Insert biased exponent
-def : InstRW<[FXb, VecDF, Lat11], (instregex "IEDTR$")>;
+def : InstRW<[FXb, VecDF, Lat11, BeginGroup], (instregex "IEDTR$")>;
def : InstRW<[FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "IEXTR$")>;
//===----------------------------------------------------------------------===//
@@ -1115,7 +1129,7 @@ def : InstRW<[VecDF], (instregex "CEXTR$")>;
// Test Data Class/Group
def : InstRW<[LSU, VecDF, Lat11], (instregex "TD(C|G)(E|D)T$")>;
-def : InstRW<[LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "TD(C|G)XT$")>;
+def : InstRW<[LSU, VecDF, VecDF, Lat15, GroupAlone], (instregex "TD(C|G)XT$")>;
// --------------------------------- Vector --------------------------------- //
@@ -1271,32 +1285,43 @@ def : InstRW<[VecStr, Lat5], (instregex "VTM$")>;
// Vector: Floating-point arithmetic
//===----------------------------------------------------------------------===//
-def : InstRW<[VecBF2], (instregex "VCD(G|GB|LG|LGB)$")>;
-def : InstRW<[VecBF], (instregex "WCD(GB|LGB)$")>;
+// Conversion and rounding
+def : InstRW<[VecBF2], (instregex "VCD(L)?G$")>;
+def : InstRW<[VecBF2], (instregex "VCD(L)?GB$")>;
+def : InstRW<[VecBF], (instregex "WCD(L)?GB$")>;
def : InstRW<[VecBF2], (instregex "VC(L)?GD$")>;
-def : InstRW<[VecBF2], (instregex "VFADB$")>;
-def : InstRW<[VecBF], (instregex "WFADB$")>;
-def : InstRW<[VecBF2], (instregex "VCGDB$")>;
-def : InstRW<[VecBF], (instregex "WCGDB$")>;
-def : InstRW<[VecBF2], (instregex "VF(I|M|A|S)$")>;
-def : InstRW<[VecBF2], (instregex "VF(I|M|S)DB$")>;
-def : InstRW<[VecBF], (instregex "WF(I|M|S)DB$")>;
-def : InstRW<[VecBF2], (instregex "VCLGDB$")>;
-def : InstRW<[VecBF], (instregex "WCLGDB$")>;
-def : InstRW<[VecXsPm], (instregex "VFL(C|N|P)DB$")>;
-def : InstRW<[VecXsPm], (instregex "WFL(C|N|P)DB$")>;
-def : InstRW<[VecBF2], (instregex "VFM(A|S)$")>;
-def : InstRW<[VecBF2], (instregex "VFM(A|S)DB$")>;
-def : InstRW<[VecBF], (instregex "WFM(A|S)DB$")>;
-def : InstRW<[VecXsPm], (instregex "VFPSO$")>;
-def : InstRW<[VecXsPm], (instregex "(V|W)FPSODB$")>;
-def : InstRW<[VecXsPm, Lat4], (instregex "VFTCI(DB)?$")>;
-def : InstRW<[VecXsPm, Lat4], (instregex "WFTCIDB$")>;
+def : InstRW<[VecBF2], (instregex "VC(L)?GDB$")>;
+def : InstRW<[VecBF], (instregex "WC(L)?GDB$")>;
def : InstRW<[VecBF2], (instregex "VL(DE|ED)$")>;
def : InstRW<[VecBF2], (instregex "VL(DE|ED)B$")>;
def : InstRW<[VecBF], (instregex "WL(DE|ED)B$")>;
+def : InstRW<[VecBF2], (instregex "VFI$")>;
+def : InstRW<[VecBF2], (instregex "VFIDB$")>;
+def : InstRW<[VecBF], (instregex "WFIDB$")>;
+
+// Sign operations
+def : InstRW<[VecXsPm], (instregex "VFPSO$")>;
+def : InstRW<[VecXsPm], (instregex "(V|W)FPSODB$")>;
+def : InstRW<[VecXsPm], (instregex "(V|W)FL(C|N|P)DB$")>;
-// divide / square root
+// Test data class
+def : InstRW<[VecXsPm, Lat4], (instregex "VFTCI$")>;
+def : InstRW<[VecXsPm, Lat4], (instregex "(V|W)FTCIDB$")>;
+
+// Add / subtract
+def : InstRW<[VecBF2], (instregex "VF(A|S)$")>;
+def : InstRW<[VecBF2], (instregex "VF(A|S)DB$")>;
+def : InstRW<[VecBF], (instregex "WF(A|S)DB$")>;
+
+// Multiply / multiply-and-add/subtract
+def : InstRW<[VecBF2], (instregex "VFM$")>;
+def : InstRW<[VecBF2], (instregex "VFMDB$")>;
+def : InstRW<[VecBF], (instregex "WFMDB$")>;
+def : InstRW<[VecBF2], (instregex "VFM(A|S)$")>;
+def : InstRW<[VecBF2], (instregex "VFM(A|S)DB$")>;
+def : InstRW<[VecBF], (instregex "WFM(A|S)DB$")>;
+
+// Divide / square root
def : InstRW<[VecFPd], (instregex "VFD$")>;
def : InstRW<[VecFPd], (instregex "(V|W)FDDB$")>;
def : InstRW<[VecFPd], (instregex "VFSQ$")>;
@@ -1308,10 +1333,10 @@ def : InstRW<[VecFPd], (instregex "(V|W)FSQDB$")>;
def : InstRW<[VecXsPm], (instregex "VFC(E|H|HE)$")>;
def : InstRW<[VecXsPm], (instregex "VFC(E|H|HE)DB$")>;
-def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)$")>;
def : InstRW<[VecXsPm], (instregex "WFC(E|H|HE)DB$")>;
def : InstRW<[VecXsPm, Lat4], (instregex "VFC(E|H|HE)DBS$")>;
def : InstRW<[VecXsPm, Lat4], (instregex "WFC(E|H|HE)DBS$")>;
+def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)$")>;
def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)DB$")>;
//===----------------------------------------------------------------------===//
@@ -1351,12 +1376,12 @@ def : InstRW<[VecStr, Lat5], (instregex "VSTRCZ(B|F|H)S$")>;
def : InstRW<[FXb, Lat30], (instregex "EPSW$")>;
def : InstRW<[FXb, LSU, Lat30], (instregex "LPSW(E)?$")>;
-def : InstRW<[FXa, Lat3], (instregex "IPK$")>;
-def : InstRW<[LSU], (instregex "SPKA$")>;
-def : InstRW<[LSU], (instregex "SSM$")>;
-def : InstRW<[FXb], (instregex "ST(N|O)SM$")>;
+def : InstRW<[FXa, Lat3, GroupAlone], (instregex "IPK$")>;
+def : InstRW<[LSU, EndGroup], (instregex "SPKA$")>;
+def : InstRW<[LSU, EndGroup], (instregex "SSM$")>;
+def : InstRW<[FXb, LSU, GroupAlone], (instregex "ST(N|O)SM$")>;
def : InstRW<[FXa, Lat3], (instregex "IAC$")>;
-def : InstRW<[LSU], (instregex "SAC(F)?$")>;
+def : InstRW<[LSU, EndGroup], (instregex "SAC(F)?$")>;
//===----------------------------------------------------------------------===//
// System: Control Register Instructions
@@ -1411,14 +1436,14 @@ def : InstRW<[FXb, LSU, Lat30], (instregex "TPROT$")>;
def : InstRW<[FXa, FXa, FXb, LSU, Lat8, GroupAlone], (instregex "MVC(K|P|S)$")>;
def : InstRW<[FXa, LSU, Lat6, GroupAlone], (instregex "MVC(S|D)K$")>;
def : InstRW<[FXb, LSU, Lat30], (instregex "MVCOS$")>;
-def : InstRW<[FXb, LSU, Lat30], (instregex "MVPG$")>;
+def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVPG$")>;
//===----------------------------------------------------------------------===//
// System: Address-Space Instructions
//===----------------------------------------------------------------------===//
def : InstRW<[FXb, LSU, Lat30], (instregex "LASP$")>;
-def : InstRW<[LSU], (instregex "PALB$")>;
+def : InstRW<[LSU, GroupAlone], (instregex "PALB$")>;
def : InstRW<[FXb, LSU, Lat30], (instregex "PC$")>;
def : InstRW<[FXb, Lat30], (instregex "PR$")>;
def : InstRW<[FXb, Lat30], (instregex "PT(I)?$")>;
@@ -1430,7 +1455,7 @@ def : InstRW<[FXb, Lat20], (instregex "TAR$")>;
// System: Linkage-Stack Instructions
//===----------------------------------------------------------------------===//
-def : InstRW<[FXb, Lat30], (instregex "BAKR$")>;
+def : InstRW<[FXb, Lat30, EndGroup], (instregex "BAKR$")>;
def : InstRW<[FXb, Lat30], (instregex "EREG(G)?$")>;
def : InstRW<[FXb, Lat30], (instregex "(E|M)STA$")>;
@@ -1442,13 +1467,13 @@ def : InstRW<[FXb, Lat30], (instregex "PTFF$")>;
def : InstRW<[FXb, LSU, Lat20], (instregex "SCK$")>;
def : InstRW<[FXb, Lat30], (instregex "SCKPF$")>;
def : InstRW<[FXb, LSU, Lat20], (instregex "SCKC$")>;
-def : InstRW<[LSU, GroupAlone], (instregex "SPT$")>;
+def : InstRW<[LSU, LSU, GroupAlone], (instregex "SPT$")>;
def : InstRW<[LSU, LSU, LSU, FXa, FXa, FXb, Lat9, GroupAlone],
(instregex "STCK(F)?$")>;
def : InstRW<[LSU, LSU, LSU, LSU, FXa, FXa, FXb, FXb, Lat11, GroupAlone],
(instregex "STCKE$")>;
def : InstRW<[FXb, LSU, Lat9], (instregex "STCKC$")>;
-def : InstRW<[LSU, LSU, FXb, Lat3], (instregex "STPT$")>;
+def : InstRW<[LSU, LSU, FXb, Lat5, BeginGroup], (instregex "STPT$")>;
//===----------------------------------------------------------------------===//
// System: CPU-Related Instructions
@@ -1459,7 +1484,7 @@ def : InstRW<[FXb, LSU, Lat30], (instregex "STIDP$")>;
def : InstRW<[FXb, LSU, Lat30], (instregex "STSI$")>;
def : InstRW<[FXb, LSU, Lat30], (instregex "STFL(E)?$")>;
def : InstRW<[FXb, LSU, Lat30], (instregex "ECAG$")>;
-def : InstRW<[FXb, LSU, Lat30], (instregex "ECTG$")>;
+def : InstRW<[FXa, LSU, Lat30], (instregex "ECTG$")>;
def : InstRW<[FXb, Lat30], (instregex "PTF$")>;
def : InstRW<[FXb, Lat30], (instregex "PCKMO$")>;
@@ -1468,7 +1493,7 @@ def : InstRW<[FXb, Lat30], (instregex "PCKMO$")>;
//===----------------------------------------------------------------------===//
def : InstRW<[FXb, Lat30], (instregex "SVC$")>;
-def : InstRW<[FXb], (instregex "MC$")>;
+def : InstRW<[FXb, GroupAlone], (instregex "MC$")>;
def : InstRW<[FXb, Lat30], (instregex "DIAG$")>;
def : InstRW<[FXb], (instregex "TRAC(E|G)$")>;
def : InstRW<[FXb, Lat30], (instregex "TRAP(2|4)$")>;
@@ -1483,7 +1508,8 @@ def : InstRW<[FXb, LSU, Lat30], (instregex "SIE$")>;
def : InstRW<[FXb], (instregex "LPP$")>;
def : InstRW<[FXb, Lat30], (instregex "ECPGA$")>;
def : InstRW<[FXb, Lat30], (instregex "E(C|P)CTR$")>;
-def : InstRW<[FXb, LSU, Lat30], (instregex "L(C|P|S)CTL$")>;
+def : InstRW<[FXb, Lat30], (instregex "LCCTL$")>;
+def : InstRW<[FXb, LSU, Lat30], (instregex "L(P|S)CTL$")>;
def : InstRW<[FXb, LSU, Lat30], (instregex "Q(S|CTR)I$")>;
def : InstRW<[FXb, Lat30], (instregex "S(C|P)CTR$")>;
diff --git a/lib/Target/SystemZ/SystemZScheduleZ196.td b/lib/Target/SystemZ/SystemZScheduleZ196.td
index 128049a09086..e3e1999d8ad8 100644
--- a/lib/Target/SystemZ/SystemZScheduleZ196.td
+++ b/lib/Target/SystemZ/SystemZScheduleZ196.td
@@ -627,8 +627,8 @@ def : InstRW<[FXU, Lat7, GroupAlone], (instregex "FLOGR$")>;
def : InstRW<[FXU, Lat3], (instregex "POPCNT$")>;
// Extend
-def : InstRW<[FXU], (instregex "AEXT128_64$")>;
-def : InstRW<[FXU], (instregex "ZEXT128_(32|64)$")>;
+def : InstRW<[FXU], (instregex "AEXT128$")>;
+def : InstRW<[FXU], (instregex "ZEXT128$")>;
// String instructions
def : InstRW<[FXU, LSU, Lat30], (instregex "SRST$")>;
diff --git a/lib/Target/SystemZ/SystemZScheduleZEC12.td b/lib/Target/SystemZ/SystemZScheduleZEC12.td
index 76b378454631..59f37205f412 100644
--- a/lib/Target/SystemZ/SystemZScheduleZEC12.td
+++ b/lib/Target/SystemZ/SystemZScheduleZEC12.td
@@ -665,8 +665,8 @@ def : InstRW<[FXU, Lat7, GroupAlone], (instregex "FLOGR$")>;
def : InstRW<[FXU, Lat3], (instregex "POPCNT$")>;
// Extend
-def : InstRW<[FXU], (instregex "AEXT128_64$")>;
-def : InstRW<[FXU], (instregex "ZEXT128_(32|64)$")>;
+def : InstRW<[FXU], (instregex "AEXT128$")>;
+def : InstRW<[FXU], (instregex "ZEXT128$")>;
// String instructions
def : InstRW<[FXU, LSU, Lat30], (instregex "SRST$")>;
diff --git a/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
index ce5c57e0f519..9ac768b2189d 100644
--- a/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
+++ b/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
@@ -779,15 +779,14 @@ int SystemZTTIImpl::
getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
// vlvgp will insert two grs into a vector register, so only count half the
// number of instructions.
- if (Opcode == Instruction::InsertElement &&
- Val->getScalarType()->isIntegerTy(64))
+ if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64))
return ((Index % 2 == 0) ? 1 : 0);
if (Opcode == Instruction::ExtractElement) {
int Cost = ((Val->getScalarSizeInBits() == 1) ? 2 /*+test-under-mask*/ : 1);
// Give a slight penalty for moving out of vector pipeline to FXU unit.
- if (Index == 0 && Val->getScalarType()->isIntegerTy())
+ if (Index == 0 && Val->isIntOrIntVectorTy())
Cost += 1;
return Cost;
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
index ad59f2f40587..00bf02469bdd 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
@@ -115,8 +115,8 @@ void WebAssemblyTargetAsmStreamer::emitStackPointer(uint32_t Index) {
void WebAssemblyTargetAsmStreamer::emitEndFunc() { OS << "\t.endfunc\n"; }
void WebAssemblyTargetAsmStreamer::emitIndirectFunctionType(
- StringRef name, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) {
- OS << "\t.functype\t" << name;
+ MCSymbol *Symbol, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) {
+ OS << "\t.functype\t" << Symbol->getName();
if (Results.empty())
OS << ", void";
else {
@@ -171,7 +171,7 @@ void WebAssemblyTargetELFStreamer::emitIndIdx(const MCExpr *Value) {
}
void WebAssemblyTargetELFStreamer::emitIndirectFunctionType(
- StringRef name, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) {
+ MCSymbol *Symbol, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) {
// Nothing to emit here. TODO: Re-design how linking works and re-evaluate
// whether it's necessary for .o files to declare indirect function types.
}
@@ -255,9 +255,25 @@ void WebAssemblyTargetWasmStreamer::emitIndIdx(const MCExpr *Value) {
}
void WebAssemblyTargetWasmStreamer::emitIndirectFunctionType(
- StringRef name, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) {
- // Nothing to emit here. TODO: Re-design how linking works and re-evaluate
- // whether it's necessary for .o files to declare indirect function types.
+ MCSymbol *Symbol, SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results) {
+ MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Symbol);
+ if (WasmSym->isFunction()) {
+ // Symbol already has its arguments and result set.
+ return;
+ }
+
+ SmallVector<wasm::ValType, 4> ValParams;
+ for (MVT Ty : Params)
+ ValParams.push_back(WebAssembly::toValType(Ty));
+
+ SmallVector<wasm::ValType, 1> ValResults;
+ for (MVT Ty : Results)
+ ValResults.push_back(WebAssembly::toValType(Ty));
+
+ WasmSym->setParams(std::move(ValParams));
+ WasmSym->setReturns(std::move(ValResults));
+ WasmSym->setIsFunction(true);
}
void WebAssemblyTargetWasmStreamer::emitGlobalImport(StringRef name) {
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
index 5ad147e5e596..102d7219a1e7 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
@@ -44,7 +44,7 @@ public:
/// .endfunc
virtual void emitEndFunc() = 0;
/// .functype
- virtual void emitIndirectFunctionType(StringRef name,
+ virtual void emitIndirectFunctionType(MCSymbol *Symbol,
SmallVectorImpl<MVT> &Params,
SmallVectorImpl<MVT> &Results) = 0;
/// .indidx
@@ -69,7 +69,7 @@ public:
void emitGlobal(ArrayRef<wasm::Global> Globals) override;
void emitStackPointer(uint32_t Index) override;
void emitEndFunc() override;
- void emitIndirectFunctionType(StringRef name,
+ void emitIndirectFunctionType(MCSymbol *Symbol,
SmallVectorImpl<MVT> &Params,
SmallVectorImpl<MVT> &Results) override;
void emitIndIdx(const MCExpr *Value) override;
@@ -87,7 +87,7 @@ public:
void emitGlobal(ArrayRef<wasm::Global> Globals) override;
void emitStackPointer(uint32_t Index) override;
void emitEndFunc() override;
- void emitIndirectFunctionType(StringRef name,
+ void emitIndirectFunctionType(MCSymbol *Symbol,
SmallVectorImpl<MVT> &Params,
SmallVectorImpl<MVT> &Results) override;
void emitIndIdx(const MCExpr *Value) override;
@@ -105,7 +105,7 @@ public:
void emitGlobal(ArrayRef<wasm::Global> Globals) override;
void emitStackPointer(uint32_t Index) override;
void emitEndFunc() override;
- void emitIndirectFunctionType(StringRef name,
+ void emitIndirectFunctionType(MCSymbol *Symbol,
SmallVectorImpl<MVT> &Params,
SmallVectorImpl<MVT> &Results) override;
void emitIndIdx(const MCExpr *Value) override;
diff --git a/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
index f51585a10ca1..211358ad66cd 100644
--- a/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
@@ -84,7 +84,7 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) {
SmallVector<MVT, 4> Results;
SmallVector<MVT, 4> Params;
ComputeSignatureVTs(F, TM, Params, Results);
- getTargetStreamer()->emitIndirectFunctionType(F.getName(), Params,
+ getTargetStreamer()->emitIndirectFunctionType(getSymbol(&F), Params,
Results);
}
}
@@ -214,11 +214,8 @@ void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *WebAssemblyAsmPrinter::lowerConstant(const Constant *CV) {
if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
if (GV->getValueType()->isFunctionTy()) {
- MCSymbol* Sym = getSymbol(GV);
- if (!isa<MCSymbolELF>(Sym))
- cast<MCSymbolWasm>(Sym)->setIsFunction(true);
return MCSymbolRefExpr::create(
- Sym, MCSymbolRefExpr::VK_WebAssembly_FUNCTION, OutContext);
+ getSymbol(GV), MCSymbolRefExpr::VK_WebAssembly_FUNCTION, OutContext);
}
return AsmPrinter::lowerConstant(CV);
}
diff --git a/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
index 1691808d05a0..700111743ee8 100644
--- a/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
@@ -132,7 +132,7 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
// no blocks not dominated by the loop header.
// - It's desirable to preserve the original block order when possible.
// We use two ready lists; Preferred and Ready. Preferred has recently
- // processed sucessors, to help preserve block sequences from the original
+ // processed successors, to help preserve block sequences from the original
// order. Ready has the remaining ready blocks.
PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
CompareBlockNumbers>
diff --git a/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
index ff186eb91503..8880539804ca 100644
--- a/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
@@ -112,8 +112,6 @@ MCOperand WebAssemblyMCInstLower::LowerSymbolOperand(MCSymbol *Sym,
MCSymbolRefExpr::VariantKind VK =
IsFunc ? MCSymbolRefExpr::VK_WebAssembly_FUNCTION
: MCSymbolRefExpr::VK_None;
- if (!isa<MCSymbolELF>(Sym))
- cast<MCSymbolWasm>(Sym)->setIsFunction(IsFunc);
const MCExpr *Expr = MCSymbolRefExpr::create(Sym, VK, Ctx);
diff --git a/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
index c02ef4a1c399..2599064334ee 100644
--- a/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
@@ -394,11 +394,22 @@ RuntimeLibcallSignatures[RTLIB::UNKNOWN_LIBCALL] = {
/* MEMMOVE */ iPTR_func_iPTR_iPTR_iPTR,
// ELEMENT-WISE ATOMIC MEMORY
-/* MEMCPY_ELEMENT_ATOMIC_1 */ iPTR_func_iPTR_iPTR_iPTR,
-/* MEMCPY_ELEMENT_ATOMIC_2 */ iPTR_func_iPTR_iPTR_iPTR,
-/* MEMCPY_ELEMENT_ATOMIC_4 */ iPTR_func_iPTR_iPTR_iPTR,
-/* MEMCPY_ELEMENT_ATOMIC_8 */ iPTR_func_iPTR_iPTR_iPTR,
-/* MEMCPY_ELEMENT_ATOMIC_16 */ iPTR_func_iPTR_iPTR_iPTR,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported,
+
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported,
// EXCEPTION HANDLING
/* UNWIND_RESUME */ unsupported,
@@ -839,11 +850,21 @@ RuntimeLibcallNames[RTLIB::UNKNOWN_LIBCALL] = {
/* MEMCPY */ "memcpy",
/* MEMMOVE */ "memset",
/* MEMSET */ "memmove",
-/* MEMCPY_ELEMENT_ATOMIC_1 */ "MEMCPY_ELEMENT_ATOMIC_1",
-/* MEMCPY_ELEMENT_ATOMIC_2 */ "MEMCPY_ELEMENT_ATOMIC_2",
-/* MEMCPY_ELEMENT_ATOMIC_4 */ "MEMCPY_ELEMENT_ATOMIC_4",
-/* MEMCPY_ELEMENT_ATOMIC_8 */ "MEMCPY_ELEMENT_ATOMIC_8",
-/* MEMCPY_ELEMENT_ATOMIC_16 */ "MEMCPY_ELEMENT_ATOMIC_16",
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr,
/* UNWIND_RESUME */ "_Unwind_Resume",
/* SYNC_VAL_COMPARE_AND_SWAP_1 */ "__sync_val_compare_and_swap_1",
/* SYNC_VAL_COMPARE_AND_SWAP_2 */ "__sync_val_compare_and_swap_2",
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 825f23dc52d9..c1d216c8b7af 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -2453,8 +2453,8 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
break;
}
- // In MS inline asm curly braces mark the begining/end of a block, therefore
- // they should be interepreted as end of statement
+ // In MS inline asm curly braces mark the beginning/end of a block,
+ // therefore they should be interepreted as end of statement
CurlyAsEndOfStatement =
isParsingIntelSyntax() && isParsingInlineAsm() &&
(getLexer().is(AsmToken::LCurly) || getLexer().is(AsmToken::RCurly));
diff --git a/lib/Target/X86/InstPrinter/X86InstComments.cpp b/lib/Target/X86/InstPrinter/X86InstComments.cpp
index 5e809c34325e..f5f3a4cc83dc 100644
--- a/lib/Target/X86/InstPrinter/X86InstComments.cpp
+++ b/lib/Target/X86/InstPrinter/X86InstComments.cpp
@@ -1038,7 +1038,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::EXTRQI:
if (MI->getOperand(2).isImm() &&
MI->getOperand(3).isImm())
- DecodeEXTRQIMask(MI->getOperand(2).getImm(),
+ DecodeEXTRQIMask(MVT::v16i8, MI->getOperand(2).getImm(),
MI->getOperand(3).getImm(),
ShuffleMask);
@@ -1049,7 +1049,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::INSERTQI:
if (MI->getOperand(3).isImm() &&
MI->getOperand(4).isImm())
- DecodeINSERTQIMask(MI->getOperand(3).getImm(),
+ DecodeINSERTQIMask(MVT::v16i8, MI->getOperand(3).getImm(),
MI->getOperand(4).getImm(),
ShuffleMask);
diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 914fb36f91a7..733eac7c0321 100644
--- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -110,7 +110,7 @@ public:
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const override {
+ uint64_t Value, bool IsResolved) const override {
unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index 1be5aec849fc..8a0fbfb45b22 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.cpp
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -452,15 +452,20 @@ void DecodeScalarMoveMask(MVT VT, bool IsLoad, SmallVectorImpl<int> &Mask) {
Mask.push_back(IsLoad ? static_cast<int>(SM_SentinelZero) : i);
}
-void DecodeEXTRQIMask(int Len, int Idx,
+void DecodeEXTRQIMask(MVT VT, int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask) {
+ assert(VT.is128BitVector() && "Expected 128-bit vector");
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned EltSize = VT.getScalarSizeInBits();
+ unsigned HalfElts = NumElts / 2;
+
// Only the bottom 6 bits are valid for each immediate.
Len &= 0x3F;
Idx &= 0x3F;
// We can only decode this bit extraction instruction as a shuffle if both the
- // length and index work with whole bytes.
- if (0 != (Len % 8) || 0 != (Idx % 8))
+ // length and index work with whole elements.
+ if (0 != (Len % EltSize) || 0 != (Idx % EltSize))
return;
// A length of zero is equivalent to a bit length of 64.
@@ -469,33 +474,38 @@ void DecodeEXTRQIMask(int Len, int Idx,
// If the length + index exceeds the bottom 64 bits the result is undefined.
if ((Len + Idx) > 64) {
- ShuffleMask.append(16, SM_SentinelUndef);
+ ShuffleMask.append(NumElts, SM_SentinelUndef);
return;
}
- // Convert index and index to work with bytes.
- Len /= 8;
- Idx /= 8;
+ // Convert index and index to work with elements.
+ Len /= EltSize;
+ Idx /= EltSize;
- // EXTRQ: Extract Len bytes starting from Idx. Zero pad the remaining bytes
- // of the lower 64-bits. The upper 64-bits are undefined.
+ // EXTRQ: Extract Len elements starting from Idx. Zero pad the remaining
+ // elements of the lower 64-bits. The upper 64-bits are undefined.
for (int i = 0; i != Len; ++i)
ShuffleMask.push_back(i + Idx);
- for (int i = Len; i != 8; ++i)
+ for (int i = Len; i != (int)HalfElts; ++i)
ShuffleMask.push_back(SM_SentinelZero);
- for (int i = 8; i != 16; ++i)
+ for (int i = HalfElts; i != (int)NumElts; ++i)
ShuffleMask.push_back(SM_SentinelUndef);
}
-void DecodeINSERTQIMask(int Len, int Idx,
+void DecodeINSERTQIMask(MVT VT, int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask) {
+ assert(VT.is128BitVector() && "Expected 128-bit vector");
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned EltSize = VT.getScalarSizeInBits();
+ unsigned HalfElts = NumElts / 2;
+
// Only the bottom 6 bits are valid for each immediate.
Len &= 0x3F;
Idx &= 0x3F;
// We can only decode this bit insertion instruction as a shuffle if both the
- // length and index work with whole bytes.
- if (0 != (Len % 8) || 0 != (Idx % 8))
+ // length and index work with whole elements.
+ if (0 != (Len % EltSize) || 0 != (Idx % EltSize))
return;
// A length of zero is equivalent to a bit length of 64.
@@ -504,24 +514,24 @@ void DecodeINSERTQIMask(int Len, int Idx,
// If the length + index exceeds the bottom 64 bits the result is undefined.
if ((Len + Idx) > 64) {
- ShuffleMask.append(16, SM_SentinelUndef);
+ ShuffleMask.append(NumElts, SM_SentinelUndef);
return;
}
- // Convert index and index to work with bytes.
- Len /= 8;
- Idx /= 8;
+ // Convert index and index to work with elements.
+ Len /= EltSize;
+ Idx /= EltSize;
- // INSERTQ: Extract lowest Len bytes from lower half of second source and
- // insert over first source starting at Idx byte. The upper 64-bits are
+ // INSERTQ: Extract lowest Len elements from lower half of second source and
+ // insert over first source starting at Idx element. The upper 64-bits are
// undefined.
for (int i = 0; i != Idx; ++i)
ShuffleMask.push_back(i);
for (int i = 0; i != Len; ++i)
- ShuffleMask.push_back(i + 16);
- for (int i = Idx + Len; i != 8; ++i)
+ ShuffleMask.push_back(i + NumElts);
+ for (int i = Idx + Len; i != (int)HalfElts; ++i)
ShuffleMask.push_back(i);
- for (int i = 8; i != 16; ++i)
+ for (int i = HalfElts; i != (int)NumElts; ++i)
ShuffleMask.push_back(SM_SentinelUndef);
}
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.h b/lib/Target/X86/Utils/X86ShuffleDecode.h
index 17619d09d059..251c9f7558ec 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.h
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -134,12 +134,12 @@ void DecodeZeroMoveLowMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
void DecodeScalarMoveMask(MVT VT, bool IsLoad,
SmallVectorImpl<int> &ShuffleMask);
-/// Decode a SSE4A EXTRQ instruction as a v16i8 shuffle mask.
-void DecodeEXTRQIMask(int Len, int Idx,
+/// Decode a SSE4A EXTRQ instruction as a shuffle mask.
+void DecodeEXTRQIMask(MVT VT, int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask);
-/// Decode a SSE4A INSERTQ instruction as a v16i8 shuffle mask.
-void DecodeINSERTQIMask(int Len, int Idx,
+/// Decode a SSE4A INSERTQ instruction as a shuffle mask.
+void DecodeINSERTQIMask(MVT VT, int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask);
/// Decode a VPERMILPD/VPERMILPS variable mask from a raw array of constants.
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td
index 7437ebacfac3..4ca57fe9fb00 100644
--- a/lib/Target/X86/X86.td
+++ b/lib/Target/X86/X86.td
@@ -451,6 +451,7 @@ class GoldmontProc<string Name> : ProcessorModel<Name, SLMModel, [
FeatureLAHFSAHF,
FeatureMPX,
FeatureSHA,
+ FeatureRDRAND,
FeatureRDSEED,
FeatureXSAVE,
FeatureXSAVEOPT,
diff --git a/lib/Target/X86/X86CallLowering.cpp b/lib/Target/X86/X86CallLowering.cpp
index 161bfa7b5474..99aeec67c326 100644
--- a/lib/Target/X86/X86CallLowering.cpp
+++ b/lib/Target/X86/X86CallLowering.cpp
@@ -19,6 +19,7 @@
#include "X86InstrInfo.h"
#include "X86TargetMachine.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineValueType.h"
@@ -35,7 +36,7 @@ using namespace llvm;
X86CallLowering::X86CallLowering(const X86TargetLowering &TLI)
: CallLowering(&TLI) {}
-void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
+bool X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
SmallVectorImpl<ArgInfo> &SplitArgs,
const DataLayout &DL,
MachineRegisterInfo &MRI,
@@ -43,14 +44,24 @@ void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
const X86TargetLowering &TLI = *getTLI<X86TargetLowering>();
LLVMContext &Context = OrigArg.Ty->getContext();
- EVT VT = TLI.getValueType(DL, OrigArg.Ty);
+
+ SmallVector<EVT, 4> SplitVTs;
+ SmallVector<uint64_t, 4> Offsets;
+ ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
+
+ if (SplitVTs.size() != 1) {
+ // TODO: support struct/array split
+ return false;
+ }
+
+ EVT VT = SplitVTs[0];
unsigned NumParts = TLI.getNumRegisters(Context, VT);
if (NumParts == 1) {
// replace the original type ( pointer -> GPR ).
SplitArgs.emplace_back(OrigArg.Reg, VT.getTypeForEVT(Context),
OrigArg.Flags, OrigArg.IsFixed);
- return;
+ return true;
}
SmallVector<unsigned, 8> SplitRegs;
@@ -67,6 +78,7 @@ void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
}
PerformArgSplit(SplitRegs);
+ return true;
}
namespace {
@@ -113,9 +125,11 @@ bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
SmallVector<ArgInfo, 8> SplitArgs;
- splitToValueTypes(
- OrigArg, SplitArgs, DL, MRI,
- [&](ArrayRef<unsigned> Regs) { MIRBuilder.buildUnmerge(Regs, VReg); });
+ if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
+ [&](ArrayRef<unsigned> Regs) {
+ MIRBuilder.buildUnmerge(Regs, VReg);
+ }))
+ return false;
FuncReturnHandler Handler(MIRBuilder, MRI, MIB, RetCC_X86);
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
@@ -181,12 +195,23 @@ bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 8> SplitArgs;
unsigned Idx = 0;
for (auto &Arg : F.args()) {
+
+ // TODO: handle not simple cases.
+ if (Arg.hasAttribute(Attribute::ByVal) ||
+ Arg.hasAttribute(Attribute::InReg) ||
+ Arg.hasAttribute(Attribute::StructRet) ||
+ Arg.hasAttribute(Attribute::SwiftSelf) ||
+ Arg.hasAttribute(Attribute::SwiftError) ||
+ Arg.hasAttribute(Attribute::Nest))
+ return false;
+
ArgInfo OrigArg(VRegs[Idx], Arg.getType());
- setArgFlags(OrigArg, Idx + 1, DL, F);
- splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
- [&](ArrayRef<unsigned> Regs) {
- MIRBuilder.buildMerge(VRegs[Idx], Regs);
- });
+ setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
+ if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
+ [&](ArrayRef<unsigned> Regs) {
+ MIRBuilder.buildMerge(VRegs[Idx], Regs);
+ }))
+ return false;
Idx++;
}
diff --git a/lib/Target/X86/X86CallLowering.h b/lib/Target/X86/X86CallLowering.h
index 8a8afb568298..6a5dabf33a0a 100644
--- a/lib/Target/X86/X86CallLowering.h
+++ b/lib/Target/X86/X86CallLowering.h
@@ -39,7 +39,7 @@ private:
/// A function of this type is used to perform value split action.
typedef std::function<void(ArrayRef<unsigned>)> SplitArgTy;
- void splitToValueTypes(const ArgInfo &OrigArgInfo,
+ bool splitToValueTypes(const ArgInfo &OrigArgInfo,
SmallVectorImpl<ArgInfo> &SplitArgs,
const DataLayout &DL, MachineRegisterInfo &MRI,
SplitArgTy SplitArg) const;
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td
index 7d146d050a5c..6decb550ad5f 100644
--- a/lib/Target/X86/X86CallingConv.td
+++ b/lib/Target/X86/X86CallingConv.td
@@ -651,7 +651,15 @@ def CC_X86_64_GHC : CallingConv<[
// Pass in STG registers: F1, F2, F3, F4, D1, D2
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCIfSubtarget<"hasSSE1()",
- CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
+ CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>,
+ // AVX
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+ CCIfSubtarget<"hasAVX()",
+ CCAssignToReg<[YMM1, YMM2, YMM3, YMM4, YMM5, YMM6]>>>,
+ // AVX-512
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
+ CCIfSubtarget<"hasAVX512()",
+ CCAssignToReg<[ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6]>>>
]>;
def CC_X86_64_HiPE : CallingConv<[
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 621505aaded9..ee9e78146305 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -3039,6 +3039,9 @@ bool X86FastISel::fastLowerArguments() {
if (!Subtarget->is64Bit())
return false;
+ if (Subtarget->useSoftFloat())
+ return false;
+
// Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
unsigned GPRCnt = 0;
unsigned FPRCnt = 0;
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index e3aa227702be..f294e819090b 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -972,7 +972,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
X86FI->setCalleeSavedFrameSize(
X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
- bool UseRedZone = false;
bool UseStackProbe = !STI.getTargetLowering()->getStackProbeSymbolName(MF).empty();
// The default stack probe size is 4096 if the function has no stackprobesize
@@ -1011,7 +1010,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
MFI.setStackSize(StackSize);
- UseRedZone = true;
}
// Insert stack pointer adjustment for later moving of return addr. Only
@@ -1189,7 +1187,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
- assert(!UseRedZone && "The Red Zone is not accounted for in stack probes");
+ assert(!X86FI->getUsesRedZone() &&
+ "The Red Zone is not accounted for in stack probes");
// Check whether EAX is livein for this block.
bool isEAXAlive = isEAXLiveIn(MBB);
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index b89914f8893e..65486cf7f529 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -4217,6 +4217,8 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::PSHUFLW:
case X86ISD::SHUFP:
case X86ISD::INSERTPS:
+ case X86ISD::EXTRQI:
+ case X86ISD::INSERTQI:
case X86ISD::PALIGNR:
case X86ISD::VSHLDQ:
case X86ISD::VSRLDQ:
@@ -5554,6 +5556,24 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
+ case X86ISD::EXTRQI:
+ if (isa<ConstantSDNode>(N->getOperand(1)) &&
+ isa<ConstantSDNode>(N->getOperand(2))) {
+ int BitLen = N->getConstantOperandVal(1);
+ int BitIdx = N->getConstantOperandVal(2);
+ DecodeEXTRQIMask(VT, BitLen, BitIdx, Mask);
+ IsUnary = true;
+ }
+ break;
+ case X86ISD::INSERTQI:
+ if (isa<ConstantSDNode>(N->getOperand(2)) &&
+ isa<ConstantSDNode>(N->getOperand(3))) {
+ int BitLen = N->getConstantOperandVal(2);
+ int BitIdx = N->getConstantOperandVal(3);
+ DecodeINSERTQIMask(VT, BitLen, BitIdx, Mask);
+ IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
+ }
+ break;
case X86ISD::UNPCKH:
DecodeUNPCKHMask(VT, Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
@@ -9317,11 +9337,11 @@ static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
return DAG.getBitcast(VT, V);
}
-/// \brief Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
-static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
- SDValue V2, ArrayRef<int> Mask,
- const APInt &Zeroable,
- SelectionDAG &DAG) {
+// EXTRQ: Extract Len elements from lower half of source, starting at Idx.
+// Remainder of lower half result is zero and upper half is all undef.
+static bool matchVectorShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
+ ArrayRef<int> Mask, uint64_t &BitLen,
+ uint64_t &BitIdx, const APInt &Zeroable) {
int Size = Mask.size();
int HalfSize = Size / 2;
assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
@@ -9329,120 +9349,133 @@ static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
// Upper half must be undefined.
if (!isUndefInRange(Mask, HalfSize, HalfSize))
- return SDValue();
+ return false;
- // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
- // Remainder of lower half result is zero and upper half is all undef.
- auto LowerAsEXTRQ = [&]() {
- // Determine the extraction length from the part of the
- // lower half that isn't zeroable.
- int Len = HalfSize;
- for (; Len > 0; --Len)
- if (!Zeroable[Len - 1])
- break;
- assert(Len > 0 && "Zeroable shuffle mask");
+ // Determine the extraction length from the part of the
+ // lower half that isn't zeroable.
+ int Len = HalfSize;
+ for (; Len > 0; --Len)
+ if (!Zeroable[Len - 1])
+ break;
+ assert(Len > 0 && "Zeroable shuffle mask");
- // Attempt to match first Len sequential elements from the lower half.
- SDValue Src;
- int Idx = -1;
- for (int i = 0; i != Len; ++i) {
- int M = Mask[i];
- if (M < 0)
- continue;
- SDValue &V = (M < Size ? V1 : V2);
- M = M % Size;
+ // Attempt to match first Len sequential elements from the lower half.
+ SDValue Src;
+ int Idx = -1;
+ for (int i = 0; i != Len; ++i) {
+ int M = Mask[i];
+ if (M == SM_SentinelUndef)
+ continue;
+ SDValue &V = (M < Size ? V1 : V2);
+ M = M % Size;
- // The extracted elements must start at a valid index and all mask
- // elements must be in the lower half.
- if (i > M || M >= HalfSize)
- return SDValue();
+ // The extracted elements must start at a valid index and all mask
+ // elements must be in the lower half.
+ if (i > M || M >= HalfSize)
+ return false;
- if (Idx < 0 || (Src == V && Idx == (M - i))) {
- Src = V;
- Idx = M - i;
- continue;
- }
- return SDValue();
+ if (Idx < 0 || (Src == V && Idx == (M - i))) {
+ Src = V;
+ Idx = M - i;
+ continue;
}
+ return false;
+ }
- if (Idx < 0)
- return SDValue();
+ if (!Src || Idx < 0)
+ return false;
- assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
- int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
- int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
- return DAG.getNode(X86ISD::EXTRQI, DL, VT, Src,
- DAG.getConstant(BitLen, DL, MVT::i8),
- DAG.getConstant(BitIdx, DL, MVT::i8));
- };
+ assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
+ BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
+ BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
+ V1 = Src;
+ return true;
+}
+
+// INSERTQ: Extract lowest Len elements from lower half of second source and
+// insert over first source, starting at Idx.
+// { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
+static bool matchVectorShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
+ ArrayRef<int> Mask, uint64_t &BitLen,
+ uint64_t &BitIdx) {
+ int Size = Mask.size();
+ int HalfSize = Size / 2;
+ assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
+
+ // Upper half must be undefined.
+ if (!isUndefInRange(Mask, HalfSize, HalfSize))
+ return false;
+
+ for (int Idx = 0; Idx != HalfSize; ++Idx) {
+ SDValue Base;
+
+ // Attempt to match first source from mask before insertion point.
+ if (isUndefInRange(Mask, 0, Idx)) {
+ /* EMPTY */
+ } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
+ Base = V1;
+ } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
+ Base = V2;
+ } else {
+ continue;
+ }
- if (SDValue ExtrQ = LowerAsEXTRQ())
- return ExtrQ;
+ // Extend the extraction length looking to match both the insertion of
+ // the second source and the remaining elements of the first.
+ for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
+ SDValue Insert;
+ int Len = Hi - Idx;
- // INSERTQ: Extract lowest Len elements from lower half of second source and
- // insert over first source, starting at Idx.
- // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
- auto LowerAsInsertQ = [&]() {
- for (int Idx = 0; Idx != HalfSize; ++Idx) {
- SDValue Base;
+ // Match insertion.
+ if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
+ Insert = V1;
+ } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
+ Insert = V2;
+ } else {
+ continue;
+ }
- // Attempt to match first source from mask before insertion point.
- if (isUndefInRange(Mask, 0, Idx)) {
+ // Match the remaining elements of the lower half.
+ if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
/* EMPTY */
- } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
+ } else if ((!Base || (Base == V1)) &&
+ isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
Base = V1;
- } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
+ } else if ((!Base || (Base == V2)) &&
+ isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
+ Size + Hi)) {
Base = V2;
} else {
continue;
}
- // Extend the extraction length looking to match both the insertion of
- // the second source and the remaining elements of the first.
- for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
- SDValue Insert;
- int Len = Hi - Idx;
-
- // Match insertion.
- if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
- Insert = V1;
- } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
- Insert = V2;
- } else {
- continue;
- }
-
- // Match the remaining elements of the lower half.
- if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
- /* EMPTY */
- } else if ((!Base || (Base == V1)) &&
- isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
- Base = V1;
- } else if ((!Base || (Base == V2)) &&
- isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
- Size + Hi)) {
- Base = V2;
- } else {
- continue;
- }
-
- // We may not have a base (first source) - this can safely be undefined.
- if (!Base)
- Base = DAG.getUNDEF(VT);
-
- int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
- int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
- return DAG.getNode(X86ISD::INSERTQI, DL, VT, Base, Insert,
- DAG.getConstant(BitLen, DL, MVT::i8),
- DAG.getConstant(BitIdx, DL, MVT::i8));
- }
+ BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
+ BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
+ V1 = Base;
+ V2 = Insert;
+ return true;
}
+ }
- return SDValue();
- };
+ return false;
+}
+
+/// \brief Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
+static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ const APInt &Zeroable,
+ SelectionDAG &DAG) {
+ uint64_t BitLen, BitIdx;
+ if (matchVectorShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
+ return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
+ DAG.getConstant(BitLen, DL, MVT::i8),
+ DAG.getConstant(BitIdx, DL, MVT::i8));
- if (SDValue InsertQ = LowerAsInsertQ())
- return InsertQ;
+ if (matchVectorShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
+ return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
+ V2 ? V2 : DAG.getUNDEF(VT),
+ DAG.getConstant(BitLen, DL, MVT::i8),
+ DAG.getConstant(BitIdx, DL, MVT::i8));
return SDValue();
}
@@ -22817,7 +22850,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
auto Builder = IRBuilder<>(AI);
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- auto SynchScope = AI->getSynchScope();
+ auto SSID = AI->getSyncScopeID();
// We must restrict the ordering to avoid generating loads with Release or
// ReleaseAcquire orderings.
auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
@@ -22839,7 +22872,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// otherwise, we might be able to be more aggressive on relaxed idempotent
// rmw. In practice, they do not look useful, so we don't try to be
// especially clever.
- if (SynchScope == SingleThread)
+ if (SSID == SyncScope::SingleThread)
// FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
// the IR level, so we must wrap it in an intrinsic.
return nullptr;
@@ -22858,7 +22891,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// Finally we can emit the atomic load.
LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
AI->getType()->getPrimitiveSizeInBits());
- Loaded->setAtomic(Order, SynchScope);
+ Loaded->setAtomic(Order, SSID);
AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();
return Loaded;
@@ -22869,13 +22902,13 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
SDLoc dl(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
- SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
+ SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
- FenceScope == CrossThread) {
+ FenceSSID == SyncScope::System) {
if (Subtarget.hasMFence())
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
@@ -23203,6 +23236,20 @@ static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SDLoc DL(Op.getNode());
SDValue Op0 = Op.getOperand(0);
+ // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
+ if (Subtarget.hasVPOPCNTDQ()) {
+ if (VT == MVT::v8i16) {
+ Op = DAG.getNode(X86ISD::VZEXT, DL, MVT::v8i64, Op0);
+ Op = DAG.getNode(ISD::CTPOP, DL, MVT::v8i64, Op);
+ return DAG.getNode(X86ISD::VTRUNC, DL, VT, Op);
+ }
+ if (VT == MVT::v16i8 || VT == MVT::v16i16) {
+ Op = DAG.getNode(X86ISD::VZEXT, DL, MVT::v16i32, Op0);
+ Op = DAG.getNode(ISD::CTPOP, DL, MVT::v16i32, Op);
+ return DAG.getNode(X86ISD::VTRUNC, DL, VT, Op);
+ }
+ }
+
if (!Subtarget.hasSSSE3()) {
// We can't use the fast LUT approach, so fall back on vectorized bitmath.
assert(VT.is128BitVector() && "Only 128-bit vectors supported in SSE!");
@@ -27101,6 +27148,7 @@ static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
// permute instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
+ const APInt &Zeroable,
bool AllowFloatDomain,
bool AllowIntDomain,
const X86Subtarget &Subtarget,
@@ -27111,38 +27159,67 @@ static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
- bool ContainsZeros = false;
- APInt Zeroable(NumMaskElts, false);
- for (unsigned i = 0; i != NumMaskElts; ++i) {
- int M = Mask[i];
- if (isUndefOrZero(M))
- Zeroable.setBit(i);
- ContainsZeros |= (M == SM_SentinelZero);
- }
+ bool ContainsZeros =
+ llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
- // Attempt to match against byte/bit shifts.
- // FIXME: Add 512-bit support.
- if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
- (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
- int ShiftAmt = matchVectorShuffleAsShift(ShuffleVT, Shuffle,
- MaskScalarSizeInBits, Mask,
- 0, Zeroable, Subtarget);
- if (0 < ShiftAmt) {
- PermuteImm = (unsigned)ShiftAmt;
+ // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
+ if (!ContainsZeros && MaskScalarSizeInBits == 64) {
+ // Check for lane crossing permutes.
+ if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
+ // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
+ if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
+ Shuffle = X86ISD::VPERMI;
+ ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
+ PermuteImm = getV4X86ShuffleImm(Mask);
+ return true;
+ }
+ if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
+ SmallVector<int, 4> RepeatedMask;
+ if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
+ Shuffle = X86ISD::VPERMI;
+ ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
+ PermuteImm = getV4X86ShuffleImm(RepeatedMask);
+ return true;
+ }
+ }
+ } else if (AllowFloatDomain && Subtarget.hasAVX()) {
+ // VPERMILPD can permute with a non-repeating shuffle.
+ Shuffle = X86ISD::VPERMILPI;
+ ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
+ PermuteImm = 0;
+ for (int i = 0, e = Mask.size(); i != e; ++i) {
+ int M = Mask[i];
+ if (M == SM_SentinelUndef)
+ continue;
+ assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
+ PermuteImm |= (M & 1) << i;
+ }
return true;
}
}
- // Ensure we don't contain any zero elements.
- if (ContainsZeros)
- return false;
-
- assert(llvm::all_of(Mask, [&](int M) {
- return SM_SentinelUndef <= M && M < (int)NumMaskElts;
- }) && "Expected unary shuffle");
+ // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
+ // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
+ // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
+ if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
+ !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
+ SmallVector<int, 4> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
+ // Narrow the repeated mask to create 32-bit element permutes.
+ SmallVector<int, 4> WordMask = RepeatedMask;
+ if (MaskScalarSizeInBits == 64)
+ scaleShuffleMask(2, RepeatedMask, WordMask);
+
+ Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
+ ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
+ ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
+ PermuteImm = getV4X86ShuffleImm(WordMask);
+ return true;
+ }
+ }
- // Handle PSHUFLW/PSHUFHW repeated patterns.
- if (MaskScalarSizeInBits == 16) {
+ // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
+ if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
ArrayRef<int> LoMask(Mask.data() + 0, 4);
@@ -27170,78 +27247,23 @@ static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
return true;
}
-
- return false;
}
- return false;
- }
-
- // We only support permutation of 32/64 bit elements after this.
- if (MaskScalarSizeInBits != 32 && MaskScalarSizeInBits != 64)
- return false;
-
- // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
- // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
- if ((AllowFloatDomain && !AllowIntDomain) && !Subtarget.hasAVX())
- return false;
-
- // Pre-AVX2 we must use float shuffles on 256-bit vectors.
- if (MaskVT.is256BitVector() && !Subtarget.hasAVX2()) {
- AllowFloatDomain = true;
- AllowIntDomain = false;
}
- // Check for lane crossing permutes.
- if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
- // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
- if (Subtarget.hasAVX2() && MaskVT.is256BitVector() && Mask.size() == 4) {
- Shuffle = X86ISD::VPERMI;
- ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
- PermuteImm = getV4X86ShuffleImm(Mask);
+ // Attempt to match against byte/bit shifts.
+ // FIXME: Add 512-bit support.
+ if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
+ (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
+ int ShiftAmt = matchVectorShuffleAsShift(ShuffleVT, Shuffle,
+ MaskScalarSizeInBits, Mask,
+ 0, Zeroable, Subtarget);
+ if (0 < ShiftAmt) {
+ PermuteImm = (unsigned)ShiftAmt;
return true;
}
- if (Subtarget.hasAVX512() && MaskVT.is512BitVector() && Mask.size() == 8) {
- SmallVector<int, 4> RepeatedMask;
- if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
- Shuffle = X86ISD::VPERMI;
- ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
- PermuteImm = getV4X86ShuffleImm(RepeatedMask);
- return true;
- }
- }
- return false;
}
- // VPERMILPD can permute with a non-repeating shuffle.
- if (AllowFloatDomain && MaskScalarSizeInBits == 64) {
- Shuffle = X86ISD::VPERMILPI;
- ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
- PermuteImm = 0;
- for (int i = 0, e = Mask.size(); i != e; ++i) {
- int M = Mask[i];
- if (M == SM_SentinelUndef)
- continue;
- assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
- PermuteImm |= (M & 1) << i;
- }
- return true;
- }
-
- // We need a repeating shuffle mask for VPERMILPS/PSHUFD.
- SmallVector<int, 4> RepeatedMask;
- if (!is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask))
- return false;
-
- // Narrow the repeated mask for 32-bit element permutes.
- SmallVector<int, 4> WordMask = RepeatedMask;
- if (MaskScalarSizeInBits == 64)
- scaleShuffleMask(2, RepeatedMask, WordMask);
-
- Shuffle = (AllowFloatDomain ? X86ISD::VPERMILPI : X86ISD::PSHUFD);
- ShuffleVT = (AllowFloatDomain ? MVT::f32 : MVT::i32);
- ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
- PermuteImm = getV4X86ShuffleImm(WordMask);
- return true;
+ return false;
}
// Attempt to match a combined unary shuffle mask against supported binary
@@ -27303,6 +27325,7 @@ static bool matchBinaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
}
static bool matchBinaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
+ const APInt &Zeroable,
bool AllowFloatDomain,
bool AllowIntDomain,
SDValue &V1, SDValue &V2, SDLoc &DL,
@@ -27388,11 +27411,6 @@ static bool matchBinaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
// Attempt to combine to INSERTPS.
if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
MaskVT.is128BitVector()) {
- APInt Zeroable(4, 0);
- for (unsigned i = 0; i != NumMaskElts; ++i)
- if (Mask[i] < 0)
- Zeroable.setBit(i);
-
if (Zeroable.getBoolValue() &&
matchVectorShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
Shuffle = X86ISD::INSERTPS;
@@ -27578,7 +27596,14 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
// Which shuffle domains are permitted?
// Permit domain crossing at higher combine depths.
bool AllowFloatDomain = FloatDomain || (Depth > 3);
- bool AllowIntDomain = !FloatDomain || (Depth > 3);
+ bool AllowIntDomain = (!FloatDomain || (Depth > 3)) &&
+ (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
+
+ // Determine zeroable mask elements.
+ APInt Zeroable(NumMaskElts, 0);
+ for (unsigned i = 0; i != NumMaskElts; ++i)
+ if (isUndefOrZero(Mask[i]))
+ Zeroable.setBit(i);
if (UnaryShuffle) {
// If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
@@ -27612,7 +27637,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
return true;
}
- if (matchUnaryPermuteVectorShuffle(MaskVT, Mask, AllowFloatDomain,
+ if (matchUnaryPermuteVectorShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
AllowIntDomain, Subtarget, Shuffle,
ShuffleVT, PermuteImm)) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
@@ -27648,7 +27673,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
return true;
}
- if (matchBinaryPermuteVectorShuffle(MaskVT, Mask, AllowFloatDomain,
+ if (matchBinaryPermuteVectorShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
AllowIntDomain, V1, V2, DL, DAG,
Subtarget, Shuffle, ShuffleVT,
PermuteImm)) {
@@ -27668,6 +27693,45 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
return true;
}
+ // Typically from here on, we need an integer version of MaskVT.
+ MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
+ IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
+
+ // Annoyingly, SSE4A instructions don't map into the above match helpers.
+ if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
+ uint64_t BitLen, BitIdx;
+ if (matchVectorShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
+ Zeroable)) {
+ if (Depth == 1 && Root.getOpcode() == X86ISD::EXTRQI)
+ return false; // Nothing to do!
+ V1 = DAG.getBitcast(IntMaskVT, V1);
+ DCI.AddToWorklist(V1.getNode());
+ Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
+ DAG.getConstant(BitLen, DL, MVT::i8),
+ DAG.getConstant(BitIdx, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ if (matchVectorShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
+ if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTQI)
+ return false; // Nothing to do!
+ V1 = DAG.getBitcast(IntMaskVT, V1);
+ DCI.AddToWorklist(V1.getNode());
+ V2 = DAG.getBitcast(IntMaskVT, V2);
+ DCI.AddToWorklist(V2.getNode());
+ Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
+ DAG.getConstant(BitLen, DL, MVT::i8),
+ DAG.getConstant(BitIdx, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+ }
+
// Don't try to re-form single instruction chains under any circumstances now
// that we've done encoding canonicalization for them.
if (Depth < 2)
@@ -27688,9 +27752,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
(Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
(Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
(Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
- MVT VPermMaskSVT = MVT::getIntegerVT(MaskEltSizeInBits);
- MVT VPermMaskVT = MVT::getVectorVT(VPermMaskSVT, NumMaskElts);
- SDValue VPermMask = getConstVector(Mask, VPermMaskVT, DAG, DL, true);
+ SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
DCI.AddToWorklist(VPermMask.getNode());
Res = DAG.getBitcast(MaskVT, V1);
DCI.AddToWorklist(Res.getNode());
@@ -27719,9 +27781,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
if (Mask[i] == SM_SentinelZero)
Mask[i] = NumMaskElts + i;
- MVT VPermMaskSVT = MVT::getIntegerVT(MaskEltSizeInBits);
- MVT VPermMaskVT = MVT::getVectorVT(VPermMaskSVT, NumMaskElts);
- SDValue VPermMask = getConstVector(Mask, VPermMaskVT, DAG, DL, true);
+ SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
DCI.AddToWorklist(VPermMask.getNode());
Res = DAG.getBitcast(MaskVT, V1);
DCI.AddToWorklist(Res.getNode());
@@ -27746,9 +27806,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
(Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
(Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
(Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
- MVT VPermMaskSVT = MVT::getIntegerVT(MaskEltSizeInBits);
- MVT VPermMaskVT = MVT::getVectorVT(VPermMaskSVT, NumMaskElts);
- SDValue VPermMask = getConstVector(Mask, VPermMaskVT, DAG, DL, true);
+ SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
DCI.AddToWorklist(VPermMask.getNode());
V1 = DAG.getBitcast(MaskVT, V1);
DCI.AddToWorklist(V1.getNode());
@@ -27807,8 +27865,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
VPermIdx.push_back(Idx);
}
- MVT VPermMaskVT = MVT::getVectorVT(MVT::i32, NumMaskElts);
- SDValue VPermMask = DAG.getBuildVector(VPermMaskVT, DL, VPermIdx);
+ SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
DCI.AddToWorklist(VPermMask.getNode());
Res = DAG.getBitcast(MaskVT, V1);
DCI.AddToWorklist(Res.getNode());
@@ -27831,8 +27888,6 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
unsigned NumLanes = MaskVT.getSizeInBits() / 128;
unsigned NumEltsPerLane = NumMaskElts / NumLanes;
SmallVector<int, 8> VPerm2Idx;
- MVT MaskIdxSVT = MVT::getIntegerVT(MaskVT.getScalarSizeInBits());
- MVT MaskIdxVT = MVT::getVectorVT(MaskIdxSVT, NumMaskElts);
unsigned M2ZImm = 0;
for (int M : Mask) {
if (M == SM_SentinelUndef) {
@@ -27852,7 +27907,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
DCI.AddToWorklist(V1.getNode());
V2 = DAG.getBitcast(MaskVT, V2);
DCI.AddToWorklist(V2.getNode());
- SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, MaskIdxVT, DAG, DL, true);
+ SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
DCI.AddToWorklist(VPerm2MaskOp.getNode());
Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
DAG.getConstant(M2ZImm, DL, MVT::i8));
@@ -29163,9 +29218,9 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
// v8i16 and v16i16.
// For these two cases, we can shuffle the upper element bytes to a
// consecutive sequence at the start of the vector and treat the results as
- // v16i8 or v32i8, and for v61i8 this is the prefferable solution. However,
+ // v16i8 or v32i8, and for v61i8 this is the preferable solution. However,
// for v16i16 this is not the case, because the shuffle is expensive, so we
- // avoid sign-exteding to this type entirely.
+ // avoid sign-extending to this type entirely.
// For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
// (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
MVT SExtVT;
@@ -29207,7 +29262,7 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
SExtVT = MVT::v16i8;
// For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
// it is not profitable to sign-extend to 256-bit because this will
- // require an extra cross-lane shuffle which is more exprensive than
+ // require an extra cross-lane shuffle which is more expensive than
// truncating the result of the compare to 128-bits.
break;
case MVT::v32i1:
@@ -29580,8 +29635,8 @@ static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
// (extends the sign bit which is zero).
// So it is correct to skip the sign/zero extend instruction.
if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
- Root.getOpcode() == ISD::ZERO_EXTEND ||
- Root.getOpcode() == ISD::ANY_EXTEND))
+ Root.getOpcode() == ISD::ZERO_EXTEND ||
+ Root.getOpcode() == ISD::ANY_EXTEND))
Root = Root.getOperand(0);
// If there was a match, we want Root to be a select that is the root of an
@@ -34950,6 +35005,40 @@ static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
+ // If X is -1 or 0, then we have an opportunity to avoid constants required in
+ // the general case below.
+ auto *ConstantX = dyn_cast<ConstantSDNode>(X);
+ if (ConstantX) {
+ if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
+ (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
+ // This is a complicated way to get -1 or 0 from the carry flag:
+ // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
+ // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
+ return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
+ DAG.getConstant(X86::COND_B, DL, MVT::i8),
+ Y.getOperand(1));
+ }
+
+ if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
+ (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
+ SDValue EFLAGS = Y->getOperand(1);
+ if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
+ EFLAGS.getValueType().isInteger() &&
+ !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
+ // Swap the operands of a SUB, and we have the same pattern as above.
+ // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
+ // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
+ SDValue NewSub = DAG.getNode(
+ X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
+ EFLAGS.getOperand(1), EFLAGS.getOperand(0));
+ SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
+ return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
+ DAG.getConstant(X86::COND_B, DL, MVT::i8),
+ NewEFLAGS);
+ }
+ }
+ }
+
if (CC == X86::COND_B) {
// X + SETB Z --> X + (mask SBB Z, Z)
// X - SETB Z --> X - (mask SBB Z, Z)
@@ -34996,7 +35085,7 @@ static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
// If X is -1 or 0, then we have an opportunity to avoid constants required in
// the general case below.
- if (auto *ConstantX = dyn_cast<ConstantSDNode>(X)) {
+ if (ConstantX) {
// 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
// fake operands:
// 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
@@ -35549,6 +35638,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
case X86ISD::INSERTPS:
+ case X86ISD::EXTRQI:
+ case X86ISD::INSERTQI:
case X86ISD::PALIGNR:
case X86ISD::VSHLDQ:
case X86ISD::VSRLDQ:
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index e1ade92979dc..dbbc2bbba6a4 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -767,6 +767,19 @@ namespace llvm {
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+ // Return true if it is profitable to combine a BUILD_VECTOR to a TRUNCATE
+ // for given operand and result types.
+ // Example of such a combine:
+ // v4i32 build_vector((extract_elt V, 0),
+ // (extract_elt V, 2),
+ // (extract_elt V, 4),
+ // (extract_elt V, 6))
+ // -->
+ // v4i32 truncate (bitcast V to v4i64)
+ bool isDesirableToCombineBuildVectorToTruncate() const override {
+ return true;
+ }
+
/// Return true if the target has native support for
/// the specified value type and it is 'desirable' to use the type for the
/// given node type. e.g. On x86 i16 is legal, but undesirable since i16
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index f3094b781c49..34d4816a2518 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -10488,7 +10488,7 @@ namespace {
return Copy;
}
- // Create a virtal register in *TLSBaseAddrReg, and populate it by
+ // Create a virtual register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I.getParent()->getParent();
diff --git a/lib/Target/X86/X86InstructionSelector.cpp b/lib/Target/X86/X86InstructionSelector.cpp
index e34a90e975b8..859d3288db89 100644
--- a/lib/Target/X86/X86InstructionSelector.cpp
+++ b/lib/Target/X86/X86InstructionSelector.cpp
@@ -32,6 +32,8 @@
#define DEBUG_TYPE "X86-isel"
+#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
+
using namespace llvm;
#ifndef LLVM_BUILD_GLOBAL_ISEL
@@ -56,7 +58,7 @@ private:
/// the patterns that don't require complex C++.
bool selectImpl(MachineInstr &I) const;
- // TODO: remove after suported by Tablegen-erated instruction selection.
+ // TODO: remove after supported by Tablegen-erated instruction selection.
unsigned getLoadStoreOp(LLT &Ty, const RegisterBank &RB, unsigned Opc,
uint64_t Alignment) const;
@@ -64,6 +66,8 @@ private:
MachineFunction &MF) const;
bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
+ bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
@@ -75,6 +79,8 @@ private:
bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
+ bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
@@ -262,6 +268,8 @@ bool X86InstructionSelector::select(MachineInstr &I) const {
return true;
if (selectFrameIndexOrGep(I, MRI, MF))
return true;
+ if (selectGlobalValue(I, MRI, MF))
+ return true;
if (selectConstant(I, MRI, MF))
return true;
if (selectTrunc(I, MRI, MF))
@@ -272,6 +280,8 @@ bool X86InstructionSelector::select(MachineInstr &I) const {
return true;
if (selectUadde(I, MRI, MF))
return true;
+ if (selectUnmergeValues(I, MRI, MF))
+ return true;
if (selectMergeValues(I, MRI, MF))
return true;
if (selectExtract(I, MRI, MF))
@@ -423,6 +433,15 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
+static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
+ if (Ty == LLT::pointer(0, 64))
+ return X86::LEA64r;
+ else if (Ty == LLT::pointer(0, 32))
+ return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
+ else
+ llvm_unreachable("Can't get LEA opcode. Unsupported type.");
+}
+
bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
@@ -435,14 +454,7 @@ bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
LLT Ty = MRI.getType(DefReg);
// Use LEA to calculate frame index and GEP
- unsigned NewOpc;
- if (Ty == LLT::pointer(0, 64))
- NewOpc = X86::LEA64r;
- else if (Ty == LLT::pointer(0, 32))
- NewOpc = STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
- else
- llvm_unreachable("Can't select G_FRAME_INDEX/G_GEP, unsupported type.");
-
+ unsigned NewOpc = getLeaOP(Ty, STI);
I.setDesc(TII.get(NewOpc));
MachineInstrBuilder MIB(MF, I);
@@ -458,6 +470,54 @@ bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
+bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ unsigned Opc = I.getOpcode();
+
+ if (Opc != TargetOpcode::G_GLOBAL_VALUE)
+ return false;
+
+ auto GV = I.getOperand(1).getGlobal();
+ if (GV->isThreadLocal()) {
+ return false; // TODO: we don't support TLS yet.
+ }
+
+ // Can't handle alternate code models yet.
+ if (TM.getCodeModel() != CodeModel::Small)
+ return 0;
+
+ X86AddressMode AM;
+ AM.GV = GV;
+ AM.GVOpFlags = STI.classifyGlobalReference(GV);
+
+ // TODO: The ABI requires an extra load. not supported yet.
+ if (isGlobalStubReference(AM.GVOpFlags))
+ return false;
+
+ // TODO: This reference is relative to the pic base. not supported yet.
+ if (isGlobalRelativeToPICBase(AM.GVOpFlags))
+ return false;
+
+ if (STI.isPICStyleRIPRel()) {
+ // Use rip-relative addressing.
+ assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
+ AM.Base.Reg = X86::RIP;
+ }
+
+ const unsigned DefReg = I.getOperand(0).getReg();
+ LLT Ty = MRI.getType(DefReg);
+ unsigned NewOpc = getLeaOP(Ty, STI);
+
+ I.setDesc(TII.get(NewOpc));
+ MachineInstrBuilder MIB(MF, I);
+
+ I.RemoveOperand(1);
+ addFullAddress(MIB, AM);
+
+ return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
+}
+
bool X86InstructionSelector::selectConstant(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
@@ -467,7 +527,8 @@ bool X86InstructionSelector::selectConstant(MachineInstr &I,
const unsigned DefReg = I.getOperand(0).getReg();
LLT Ty = MRI.getType(DefReg);
- assert(Ty.isScalar() && "invalid element type.");
+ if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
+ return false;
uint64_t Val = 0;
if (I.getOperand(1).isCImm()) {
@@ -576,37 +637,40 @@ bool X86InstructionSelector::selectZext(MachineInstr &I,
const LLT DstTy = MRI.getType(DstReg);
const LLT SrcTy = MRI.getType(SrcReg);
- if (SrcTy == LLT::scalar(1)) {
-
- unsigned AndOpc;
- if (DstTy == LLT::scalar(32))
- AndOpc = X86::AND32ri8;
- else if (DstTy == LLT::scalar(64))
- AndOpc = X86::AND64ri8;
- else
- return false;
+ if (SrcTy != LLT::scalar(1))
+ return false;
- unsigned DefReg =
- MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
+ unsigned AndOpc;
+ if (DstTy == LLT::scalar(8))
+ AndOpc = X86::AND8ri;
+ else if (DstTy == LLT::scalar(16))
+ AndOpc = X86::AND16ri8;
+ else if (DstTy == LLT::scalar(32))
+ AndOpc = X86::AND32ri8;
+ else if (DstTy == LLT::scalar(64))
+ AndOpc = X86::AND64ri8;
+ else
+ return false;
+ unsigned DefReg = SrcReg;
+ if (DstTy != LLT::scalar(8)) {
+ DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
.addImm(0)
.addReg(SrcReg)
.addImm(X86::sub_8bit);
+ }
- MachineInstr &AndInst =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
- .addReg(DefReg)
- .addImm(1);
-
- constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
+ MachineInstr &AndInst =
+ *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
+ .addReg(DefReg)
+ .addImm(1);
- I.eraseFromParent();
- return true;
- }
+ constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
- return false;
+ I.eraseFromParent();
+ return true;
}
bool X86InstructionSelector::selectCmp(MachineInstr &I,
@@ -918,6 +982,33 @@ bool X86InstructionSelector::selectInsert(MachineInstr &I,
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
+bool X86InstructionSelector::selectUnmergeValues(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ if (I.getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+ return false;
+
+ // Split to extracts.
+ unsigned NumDefs = I.getNumOperands() - 1;
+ unsigned SrcReg = I.getOperand(NumDefs).getReg();
+ unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
+
+ for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
+
+ MachineInstr &ExtrInst =
+ *BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
+ .addReg(SrcReg)
+ .addImm(Idx * DefSize);
+
+ if (!select(ExtrInst))
+ return false;
+ }
+
+ I.eraseFromParent();
+ return true;
+}
+
bool X86InstructionSelector::selectMergeValues(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
diff --git a/lib/Target/X86/X86LegalizerInfo.cpp b/lib/Target/X86/X86LegalizerInfo.cpp
index a5fa3340c3f1..744ba21011af 100644
--- a/lib/Target/X86/X86LegalizerInfo.cpp
+++ b/lib/Target/X86/X86LegalizerInfo.cpp
@@ -69,12 +69,14 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
for (auto Ty : {s8, s16, s32, p0})
setAction({MemOp, Ty}, Legal);
+ setAction({MemOp, s1}, WidenScalar);
// And everything's fine in addrspace 0.
setAction({MemOp, 1, p0}, Legal);
}
// Pointer-handling
setAction({G_FRAME_INDEX, p0}, Legal);
+ setAction({G_GLOBAL_VALUE, p0}, Legal);
setAction({G_GEP, p0}, Legal);
setAction({G_GEP, 1, s32}, Legal);
@@ -90,8 +92,10 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
setAction({TargetOpcode::G_CONSTANT, s64}, NarrowScalar);
// Extensions
- setAction({G_ZEXT, s32}, Legal);
- setAction({G_SEXT, s32}, Legal);
+ for (auto Ty : {s8, s16, s32}) {
+ setAction({G_ZEXT, Ty}, Legal);
+ setAction({G_SEXT, Ty}, Legal);
+ }
for (auto Ty : {s1, s8, s16}) {
setAction({G_ZEXT, 1, Ty}, Legal);
@@ -125,12 +129,14 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
for (auto Ty : {s8, s16, s32, s64, p0})
setAction({MemOp, Ty}, Legal);
+ setAction({MemOp, s1}, WidenScalar);
// And everything's fine in addrspace 0.
setAction({MemOp, 1, p0}, Legal);
}
// Pointer-handling
setAction({G_FRAME_INDEX, p0}, Legal);
+ setAction({G_GLOBAL_VALUE, p0}, Legal);
setAction({G_GEP, p0}, Legal);
setAction({G_GEP, 1, s32}, Legal);
@@ -146,7 +152,7 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
setAction({TargetOpcode::G_CONSTANT, s1}, WidenScalar);
// Extensions
- for (auto Ty : {s32, s64}) {
+ for (auto Ty : {s8, s16, s32, s64}) {
setAction({G_ZEXT, Ty}, Legal);
setAction({G_SEXT, Ty}, Legal);
}
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index 33bc8e11a572..fd2837b79103 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -1042,7 +1042,7 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
X86MCInstLower &MCIL) {
- assert(Subtarget->is64Bit() && "XRay custom events only suports X86-64");
+ assert(Subtarget->is64Bit() && "XRay custom events only supports X86-64");
// We want to emit the following pattern, which follows the x86 calling
// convention to prepare for the trampoline call to be patched in.
@@ -1332,6 +1332,32 @@ static std::string getShuffleComment(const MachineInstr *MI,
return Comment;
}
+static void printConstant(const Constant *COp, raw_ostream &CS) {
+ if (isa<UndefValue>(COp)) {
+ CS << "u";
+ } else if (auto *CI = dyn_cast<ConstantInt>(COp)) {
+ if (CI->getBitWidth() <= 64) {
+ CS << CI->getZExtValue();
+ } else {
+ // print multi-word constant as (w0,w1)
+ const auto &Val = CI->getValue();
+ CS << "(";
+ for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
+ if (i > 0)
+ CS << ",";
+ CS << Val.getRawData()[i];
+ }
+ CS << ")";
+ }
+ } else if (auto *CF = dyn_cast<ConstantFP>(COp)) {
+ SmallString<32> Str;
+ CF->getValueAPF().toString(Str);
+ CS << Str;
+ } else {
+ CS << "?";
+ }
+}
+
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(*MF, *this);
const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo();
@@ -1766,59 +1792,73 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
// For loads from a constant pool to a vector register, print the constant
// loaded.
CASE_ALL_MOV_RM()
+ case X86::VBROADCASTF128:
+ case X86::VBROADCASTI128:
+ case X86::VBROADCASTF32X4Z256rm:
+ case X86::VBROADCASTF32X4rm:
+ case X86::VBROADCASTF32X8rm:
+ case X86::VBROADCASTF64X2Z128rm:
+ case X86::VBROADCASTF64X2rm:
+ case X86::VBROADCASTF64X4rm:
+ case X86::VBROADCASTI32X4Z256rm:
+ case X86::VBROADCASTI32X4rm:
+ case X86::VBROADCASTI32X8rm:
+ case X86::VBROADCASTI64X2Z128rm:
+ case X86::VBROADCASTI64X2rm:
+ case X86::VBROADCASTI64X4rm:
if (!OutStreamer->isVerboseAsm())
break;
if (MI->getNumOperands() <= 4)
break;
if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
+ int NumLanes = 1;
+ // Override NumLanes for the broadcast instructions.
+ switch (MI->getOpcode()) {
+ case X86::VBROADCASTF128: NumLanes = 2; break;
+ case X86::VBROADCASTI128: NumLanes = 2; break;
+ case X86::VBROADCASTF32X4Z256rm: NumLanes = 2; break;
+ case X86::VBROADCASTF32X4rm: NumLanes = 4; break;
+ case X86::VBROADCASTF32X8rm: NumLanes = 2; break;
+ case X86::VBROADCASTF64X2Z128rm: NumLanes = 2; break;
+ case X86::VBROADCASTF64X2rm: NumLanes = 4; break;
+ case X86::VBROADCASTF64X4rm: NumLanes = 2; break;
+ case X86::VBROADCASTI32X4Z256rm: NumLanes = 2; break;
+ case X86::VBROADCASTI32X4rm: NumLanes = 4; break;
+ case X86::VBROADCASTI32X8rm: NumLanes = 2; break;
+ case X86::VBROADCASTI64X2Z128rm: NumLanes = 2; break;
+ case X86::VBROADCASTI64X2rm: NumLanes = 4; break;
+ case X86::VBROADCASTI64X4rm: NumLanes = 2; break;
+ }
+
std::string Comment;
raw_string_ostream CS(Comment);
const MachineOperand &DstOp = MI->getOperand(0);
CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
CS << "[";
- for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) {
- if (i != 0)
- CS << ",";
- if (CDS->getElementType()->isIntegerTy())
- CS << CDS->getElementAsInteger(i);
- else if (CDS->getElementType()->isFloatTy())
- CS << CDS->getElementAsFloat(i);
- else if (CDS->getElementType()->isDoubleTy())
- CS << CDS->getElementAsDouble(i);
- else
- CS << "?";
+ for (int l = 0; l != NumLanes; ++l) {
+ for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) {
+ if (i != 0 || l != 0)
+ CS << ",";
+ if (CDS->getElementType()->isIntegerTy())
+ CS << CDS->getElementAsInteger(i);
+ else if (CDS->getElementType()->isFloatTy())
+ CS << CDS->getElementAsFloat(i);
+ else if (CDS->getElementType()->isDoubleTy())
+ CS << CDS->getElementAsDouble(i);
+ else
+ CS << "?";
+ }
}
CS << "]";
OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo);
} else if (auto *CV = dyn_cast<ConstantVector>(C)) {
CS << "<";
- for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) {
- if (i != 0)
- CS << ",";
- Constant *COp = CV->getOperand(i);
- if (isa<UndefValue>(COp)) {
- CS << "u";
- } else if (auto *CI = dyn_cast<ConstantInt>(COp)) {
- if (CI->getBitWidth() <= 64) {
- CS << CI->getZExtValue();
- } else {
- // print multi-word constant as (w0,w1)
- const auto &Val = CI->getValue();
- CS << "(";
- for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
- if (i > 0)
- CS << ",";
- CS << Val.getRawData()[i];
- }
- CS << ")";
- }
- } else if (auto *CF = dyn_cast<ConstantFP>(COp)) {
- SmallString<32> Str;
- CF->getValueAPF().toString(Str);
- CS << Str;
- } else {
- CS << "?";
+ for (int l = 0; l != NumLanes; ++l) {
+ for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) {
+ if (i != 0 || l != 0)
+ CS << ",";
+ printConstant(CV->getOperand(i), CS);
}
}
CS << ">";
@@ -1826,6 +1866,85 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
}
break;
+ case X86::VBROADCASTSSrm:
+ case X86::VBROADCASTSSYrm:
+ case X86::VBROADCASTSSZ128m:
+ case X86::VBROADCASTSSZ256m:
+ case X86::VBROADCASTSSZm:
+ case X86::VBROADCASTSDYrm:
+ case X86::VBROADCASTSDZ256m:
+ case X86::VBROADCASTSDZm:
+ case X86::VPBROADCASTBrm:
+ case X86::VPBROADCASTBYrm:
+ case X86::VPBROADCASTBZ128m:
+ case X86::VPBROADCASTBZ256m:
+ case X86::VPBROADCASTBZm:
+ case X86::VPBROADCASTDrm:
+ case X86::VPBROADCASTDYrm:
+ case X86::VPBROADCASTDZ128m:
+ case X86::VPBROADCASTDZ256m:
+ case X86::VPBROADCASTDZm:
+ case X86::VPBROADCASTQrm:
+ case X86::VPBROADCASTQYrm:
+ case X86::VPBROADCASTQZ128m:
+ case X86::VPBROADCASTQZ256m:
+ case X86::VPBROADCASTQZm:
+ case X86::VPBROADCASTWrm:
+ case X86::VPBROADCASTWYrm:
+ case X86::VPBROADCASTWZ128m:
+ case X86::VPBROADCASTWZ256m:
+ case X86::VPBROADCASTWZm:
+ if (!OutStreamer->isVerboseAsm())
+ break;
+ if (MI->getNumOperands() <= 4)
+ break;
+ if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
+ int NumElts;
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::VBROADCASTSSrm: NumElts = 4; break;
+ case X86::VBROADCASTSSYrm: NumElts = 8; break;
+ case X86::VBROADCASTSSZ128m: NumElts = 4; break;
+ case X86::VBROADCASTSSZ256m: NumElts = 8; break;
+ case X86::VBROADCASTSSZm: NumElts = 16; break;
+ case X86::VBROADCASTSDYrm: NumElts = 4; break;
+ case X86::VBROADCASTSDZ256m: NumElts = 4; break;
+ case X86::VBROADCASTSDZm: NumElts = 8; break;
+ case X86::VPBROADCASTBrm: NumElts = 16; break;
+ case X86::VPBROADCASTBYrm: NumElts = 32; break;
+ case X86::VPBROADCASTBZ128m: NumElts = 16; break;
+ case X86::VPBROADCASTBZ256m: NumElts = 32; break;
+ case X86::VPBROADCASTBZm: NumElts = 64; break;
+ case X86::VPBROADCASTDrm: NumElts = 4; break;
+ case X86::VPBROADCASTDYrm: NumElts = 8; break;
+ case X86::VPBROADCASTDZ128m: NumElts = 4; break;
+ case X86::VPBROADCASTDZ256m: NumElts = 8; break;
+ case X86::VPBROADCASTDZm: NumElts = 16; break;
+ case X86::VPBROADCASTQrm: NumElts = 2; break;
+ case X86::VPBROADCASTQYrm: NumElts = 4; break;
+ case X86::VPBROADCASTQZ128m: NumElts = 2; break;
+ case X86::VPBROADCASTQZ256m: NumElts = 4; break;
+ case X86::VPBROADCASTQZm: NumElts = 8; break;
+ case X86::VPBROADCASTWrm: NumElts = 8; break;
+ case X86::VPBROADCASTWYrm: NumElts = 16; break;
+ case X86::VPBROADCASTWZ128m: NumElts = 8; break;
+ case X86::VPBROADCASTWZ256m: NumElts = 16; break;
+ case X86::VPBROADCASTWZm: NumElts = 32; break;
+ }
+
+ std::string Comment;
+ raw_string_ostream CS(Comment);
+ const MachineOperand &DstOp = MI->getOperand(0);
+ CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
+ CS << "[";
+ for (int i = 0; i != NumElts; ++i) {
+ if (i != 0)
+ CS << ",";
+ printConstant(C, CS);
+ }
+ CS << "]";
+ OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo);
+ }
}
MCInst TmpInst;
diff --git a/lib/Target/X86/X86SchedSandyBridge.td b/lib/Target/X86/X86SchedSandyBridge.td
index b8ec5883152c..6d85ca6cad64 100644
--- a/lib/Target/X86/X86SchedSandyBridge.td
+++ b/lib/Target/X86/X86SchedSandyBridge.td
@@ -24,8 +24,8 @@ def SandyBridgeModel : SchedMachineModel {
// Based on the LSD (loop-stream detector) queue size.
let LoopMicroOpBufferSize = 28;
- // FIXME: SSE4 and AVX are unimplemented. This flag is set to allow
- // the scheduler to assign a default model to unrecognized opcodes.
+ // This flag is set to allow the scheduler to assign
+ // a default model to unrecognized opcodes.
let CompleteModel = 0;
}
@@ -48,6 +48,7 @@ def SBPort23 : ProcResource<2>;
def SBPort4 : ProcResource<1>;
// Many micro-ops are capable of issuing on multiple ports.
+def SBPort01 : ProcResGroup<[SBPort0, SBPort1]>;
def SBPort05 : ProcResGroup<[SBPort0, SBPort5]>;
def SBPort15 : ProcResGroup<[SBPort1, SBPort5]>;
def SBPort015 : ProcResGroup<[SBPort0, SBPort1, SBPort5]>;
@@ -115,10 +116,10 @@ def : WriteRes<WriteIDivLd, [SBPort23, SBPort0, SBDivider]> {
// Scalar and vector floating point.
defm : SBWriteResPair<WriteFAdd, SBPort1, 3>;
defm : SBWriteResPair<WriteFMul, SBPort0, 5>;
-defm : SBWriteResPair<WriteFDiv, SBPort0, 12>; // 10-14 cycles.
+defm : SBWriteResPair<WriteFDiv, SBPort0, 24>;
defm : SBWriteResPair<WriteFRcp, SBPort0, 5>;
defm : SBWriteResPair<WriteFRsqrt, SBPort0, 5>;
-defm : SBWriteResPair<WriteFSqrt, SBPort0, 15>;
+defm : SBWriteResPair<WriteFSqrt, SBPort0, 14>;
defm : SBWriteResPair<WriteCvtF2I, SBPort1, 3>;
defm : SBWriteResPair<WriteCvtI2F, SBPort1, 4>;
defm : SBWriteResPair<WriteCvtF2F, SBPort1, 3>;
@@ -134,11 +135,11 @@ def : WriteRes<WriteFVarBlendLd, [SBPort0, SBPort5, SBPort23]> {
}
// Vector integer operations.
-defm : SBWriteResPair<WriteVecShift, SBPort05, 1>;
-defm : SBWriteResPair<WriteVecLogic, SBPort015, 1>;
-defm : SBWriteResPair<WriteVecALU, SBPort15, 1>;
+defm : SBWriteResPair<WriteVecShift, SBPort5, 1>;
+defm : SBWriteResPair<WriteVecLogic, SBPort5, 1>;
+defm : SBWriteResPair<WriteVecALU, SBPort1, 3>;
defm : SBWriteResPair<WriteVecIMul, SBPort0, 5>;
-defm : SBWriteResPair<WriteShuffle, SBPort15, 1>;
+defm : SBWriteResPair<WriteShuffle, SBPort5, 1>;
defm : SBWriteResPair<WriteBlend, SBPort15, 1>;
def : WriteRes<WriteVarBlend, [SBPort1, SBPort5]> {
let Latency = 2;
@@ -148,13 +149,15 @@ def : WriteRes<WriteVarBlendLd, [SBPort1, SBPort5, SBPort23]> {
let Latency = 6;
let ResourceCycles = [1, 1, 1];
}
-def : WriteRes<WriteMPSAD, [SBPort0, SBPort1, SBPort5]> {
- let Latency = 6;
- let ResourceCycles = [1, 1, 1];
+def : WriteRes<WriteMPSAD, [SBPort0,SBPort15]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,2];
}
-def : WriteRes<WriteMPSADLd, [SBPort0, SBPort1, SBPort5, SBPort23]> {
- let Latency = 6;
- let ResourceCycles = [1, 1, 1, 1];
+def : WriteRes<WriteMPSADLd, [SBPort0,SBPort23,SBPort15]> {
+ let Latency = 11;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,2];
}
////////////////////////////////////////////////////////////////////////////////
@@ -204,13 +207,15 @@ def : WriteRes<WritePCmpEStrMLd, [SBPort015, SBPort23]> {
}
// Packed Compare Implicit Length Strings, Return Index
-def : WriteRes<WritePCmpIStrI, [SBPort015]> {
- let Latency = 3;
+def : WriteRes<WritePCmpIStrI, [SBPort0]> {
+ let Latency = 11;
+ let NumMicroOps = 3;
let ResourceCycles = [3];
}
-def : WriteRes<WritePCmpIStrILd, [SBPort015, SBPort23]> {
- let Latency = 3;
- let ResourceCycles = [3, 1];
+def : WriteRes<WritePCmpIStrILd, [SBPort0,SBPort23]> {
+ let Latency = 17;
+ let NumMicroOps = 4;
+ let ResourceCycles = [3,1];
}
// Packed Compare Explicit Length Strings, Return Index
@@ -224,22 +229,26 @@ def : WriteRes<WritePCmpEStrILd, [SBPort015, SBPort23]> {
}
// AES Instructions.
-def : WriteRes<WriteAESDecEnc, [SBPort015]> {
- let Latency = 8;
- let ResourceCycles = [2];
+def : WriteRes<WriteAESDecEnc, [SBPort5,SBPort015]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
}
-def : WriteRes<WriteAESDecEncLd, [SBPort015, SBPort23]> {
- let Latency = 8;
- let ResourceCycles = [2, 1];
+def : WriteRes<WriteAESDecEncLd, [SBPort5,SBPort23,SBPort015]> {
+ let Latency = 13;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
}
-def : WriteRes<WriteAESIMC, [SBPort015]> {
- let Latency = 8;
+def : WriteRes<WriteAESIMC, [SBPort5]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
let ResourceCycles = [2];
}
-def : WriteRes<WriteAESIMCLd, [SBPort015, SBPort23]> {
- let Latency = 8;
- let ResourceCycles = [2, 1];
+def : WriteRes<WriteAESIMCLd, [SBPort5,SBPort23]> {
+ let Latency = 18;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
}
def : WriteRes<WriteAESKeyGen, [SBPort015]> {
@@ -272,4 +281,2407 @@ def : WriteRes<WriteNop, []>;
defm : SBWriteResPair<WriteFShuffle256, SBPort0, 1>;
defm : SBWriteResPair<WriteShuffle256, SBPort0, 1>;
defm : SBWriteResPair<WriteVarVecShift, SBPort0, 1>;
+
+// Remaining SNB instrs.
+
+def SBWriteResGroup0 : SchedWriteRes<[SBPort0]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup0], (instregex "CVTSS2SDrr")>;
+def: InstRW<[SBWriteResGroup0], (instregex "PSLLDri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "PSLLQri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "PSLLWri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "PSRADri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "PSRAWri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "PSRLDri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "PSRLQri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "PSRLWri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VCVTSS2SDrr")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPMOVMSKBrr")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPSLLDri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPSLLQri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPSLLWri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPSRADri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPSRAWri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPSRLDri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPSRLQri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VPSRLWri")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VTESTPDYrr")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VTESTPDrr")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VTESTPSYrr")>;
+def: InstRW<[SBWriteResGroup0], (instregex "VTESTPSrr")>;
+
+def SBWriteResGroup1 : SchedWriteRes<[SBPort1]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup1], (instregex "COMP_FST0r")>;
+def: InstRW<[SBWriteResGroup1], (instregex "COM_FST0r")>;
+def: InstRW<[SBWriteResGroup1], (instregex "UCOM_FPr")>;
+def: InstRW<[SBWriteResGroup1], (instregex "UCOM_Fr")>;
+
+def SBWriteResGroup2 : SchedWriteRes<[SBPort5]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup2], (instregex "ANDNPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "ANDNPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "ANDPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "ANDPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "FDECSTP")>;
+def: InstRW<[SBWriteResGroup2], (instregex "FFREE")>;
+def: InstRW<[SBWriteResGroup2], (instregex "FINCSTP")>;
+def: InstRW<[SBWriteResGroup2], (instregex "FNOP")>;
+def: InstRW<[SBWriteResGroup2], (instregex "INSERTPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "JMP64r")>;
+def: InstRW<[SBWriteResGroup2], (instregex "LD_Frr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOV64toPQIrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVAPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVAPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVDDUPrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVDI2PDIrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVHLPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVLHPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVSDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVSHDUPrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVSLDUPrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVSSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVUPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "MOVUPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "ORPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "ORPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "RETQ")>;
+def: InstRW<[SBWriteResGroup2], (instregex "SHUFPDrri")>;
+def: InstRW<[SBWriteResGroup2], (instregex "SHUFPSrri")>;
+def: InstRW<[SBWriteResGroup2], (instregex "ST_FPrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "ST_Frr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "UNPCKHPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "UNPCKHPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "UNPCKLPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "UNPCKLPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VANDNPDYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VANDNPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VANDNPSYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VANDNPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VANDPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VANDPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VANDPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VEXTRACTF128rr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VINSERTF128rr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VINSERTPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOV64toPQIrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOV64toPQIrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVAPDYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVAPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVAPSYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVAPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVDDUPYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVDDUPrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVHLPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVHLPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVSDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVSHDUPYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVSHDUPrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVSLDUPYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVSLDUPrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVSSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVUPDYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVUPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVUPSYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VMOVUPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VORPDYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VORPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VORPSYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VORPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VPERMILPDri")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VPERMILPDrm")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VPERMILPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VPERMILPSri")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VPERMILPSrm")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VPERMILPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VPERMILPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VSHUFPDYrri")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VSHUFPDrri")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VSHUFPSYrri")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VSHUFPSrri")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VUNPCKHPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VUNPCKHPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VUNPCKLPDYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VUNPCKLPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VUNPCKLPSYrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VUNPCKLPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VXORPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "VXORPSrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "XORPDrr")>;
+def: InstRW<[SBWriteResGroup2], (instregex "XORPSrr")>;
+
+def SBWriteResGroup3 : SchedWriteRes<[SBPort01]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup3], (instregex "LEA64_32r")>;
+
+def SBWriteResGroup4 : SchedWriteRes<[SBPort0]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup4], (instregex "BLENDPDrri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BLENDPSrri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BT32ri8")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BT32rr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BTC32ri8")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BTC32rr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BTR32ri8")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BTR32rr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BTS32ri8")>;
+def: InstRW<[SBWriteResGroup4], (instregex "BTS32rr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "CDQ")>;
+def: InstRW<[SBWriteResGroup4], (instregex "CQO")>;
+def: InstRW<[SBWriteResGroup4], (instregex "LAHF")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SAHF")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SAR32ri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SAR8ri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETAEr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETBr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETEr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETGEr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETGr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETLEr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETLr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETNEr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETNOr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETNPr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETNSr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETOr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETPr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SETSr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SHL32ri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SHL64r1")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SHL8r1")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SHL8ri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SHR32ri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "SHR8ri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "VBLENDPDYrri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "VBLENDPDrri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "VBLENDPSYrri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "VBLENDPSrri")>;
+def: InstRW<[SBWriteResGroup4], (instregex "VMOVDQAYrr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "VMOVDQArr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "VMOVDQUYrr")>;
+def: InstRW<[SBWriteResGroup4], (instregex "VMOVDQUrr")>;
+
+def SBWriteResGroup5 : SchedWriteRes<[SBPort15]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup5], (instregex "KORTESTBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PABSBrr64")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PABSDrr64")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PABSWrr64")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PADDQirr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PALIGNR64irr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PSHUFBrr64")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PSIGNBrr64")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PSIGNDrr64")>;
+def: InstRW<[SBWriteResGroup5], (instregex "MMX_PSIGNWrr64")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PABSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PABSDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PABSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PACKSSDWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PACKSSWBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PACKUSDWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PACKUSWBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PADDBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PADDDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PADDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PADDSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PADDSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PADDUSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PADDUSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PADDWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PALIGNRrri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PAVGBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PAVGWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PBLENDWrri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PCMPEQBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PCMPEQDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PCMPEQQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PCMPEQWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PCMPGTBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PCMPGTDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PCMPGTWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMAXSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMAXSDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMAXSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMAXUBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMAXUDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMAXUWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMINSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMINSDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMINSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMINUBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMINUDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMINUWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVSXBDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVSXBQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVSXBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVSXDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVSXWDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVSXWQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVZXBDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVZXBQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVZXBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVZXDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVZXWDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PMOVZXWQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSHUFBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSHUFDri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSHUFHWri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSHUFLWri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSIGNBrr128")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSIGNDrr128")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSIGNWrr128")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSLLDQri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSRLDQri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSUBBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSUBDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSUBQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSUBSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSUBSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSUBUSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSUBUSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PSUBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PUNPCKHBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PUNPCKHDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PUNPCKHQDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PUNPCKHWDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PUNPCKLBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PUNPCKLDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PUNPCKLQDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "PUNPCKLWDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VMASKMOVPSYrm")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPABSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPABSDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPABSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPACKSSDWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPACKSSWBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPACKUSDWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPACKUSWBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPADDBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPADDDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPADDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPADDUSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPADDUSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPALIGNRrri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPAVGBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPAVGWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPBLENDWrri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPCMPEQBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPCMPEQDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPCMPEQWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPCMPGTBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPCMPGTDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPCMPGTWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMAXSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMAXSDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMAXSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMAXUBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMAXUDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMAXUWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMINSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMINSDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMINSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMINUBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMINUDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMINUWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVSXBDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVSXBQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVSXBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVSXDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVSXWDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVSXWQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVZXBDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVZXBQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVZXBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVZXDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVZXWDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPMOVZXWQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSHUFBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSHUFDri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSHUFLWri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSIGNBrr128")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSIGNDrr128")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSIGNWrr128")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSLLDQri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSRLDQri")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSUBBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSUBDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSUBQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSUBSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSUBSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSUBUSBrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSUBUSWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPSUBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPUNPCKHBWrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPUNPCKHDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPUNPCKHWDrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPUNPCKLDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPUNPCKLQDQrr")>;
+def: InstRW<[SBWriteResGroup5], (instregex "VPUNPCKLWDrr")>;
+
+def SBWriteResGroup6 : SchedWriteRes<[SBPort015]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup6], (instregex "ADD32ri8")>;
+def: InstRW<[SBWriteResGroup6], (instregex "ADD32rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "ADD8ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "ADD8rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "AND32ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "AND64ri8")>;
+def: InstRW<[SBWriteResGroup6], (instregex "AND64rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "AND8ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "AND8rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "CBW")>;
+def: InstRW<[SBWriteResGroup6], (instregex "CMC")>;
+def: InstRW<[SBWriteResGroup6], (instregex "CMP16ri8")>;
+def: InstRW<[SBWriteResGroup6], (instregex "CMP32i32")>;
+def: InstRW<[SBWriteResGroup6], (instregex "CMP64rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "CMP8ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "CMP8rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "CWDE")>;
+def: InstRW<[SBWriteResGroup6], (instregex "DEC64r")>;
+def: InstRW<[SBWriteResGroup6], (instregex "DEC8r")>;
+def: InstRW<[SBWriteResGroup6], (instregex "INC64r")>;
+def: InstRW<[SBWriteResGroup6], (instregex "INC8r")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MMX_MOVD64from64rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MMX_MOVQ2DQrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOV32rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOV8ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOV8rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOVDQArr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOVDQUrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOVPQI2QIrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOVSX32rr16")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOVSX32rr8")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOVZX32rr16")>;
+def: InstRW<[SBWriteResGroup6], (instregex "MOVZX32rr8")>;
+def: InstRW<[SBWriteResGroup6], (instregex "NEG64r")>;
+def: InstRW<[SBWriteResGroup6], (instregex "NEG8r")>;
+def: InstRW<[SBWriteResGroup6], (instregex "NOT64r")>;
+def: InstRW<[SBWriteResGroup6], (instregex "NOT8r")>;
+def: InstRW<[SBWriteResGroup6], (instregex "OR64ri8")>;
+def: InstRW<[SBWriteResGroup6], (instregex "OR64rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "OR8ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "OR8rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "PANDNrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "PANDrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "PORrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "PXORrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "STC")>;
+def: InstRW<[SBWriteResGroup6], (instregex "SUB64ri8")>;
+def: InstRW<[SBWriteResGroup6], (instregex "SUB64rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "SUB8ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "SUB8rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "TEST64rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "TEST8ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "TEST8rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "VMOVPQI2QIrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "VMOVZPQILo2PQIrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "VPANDNrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "VPANDrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "VPORrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "VPXORrr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "XOR32rr")>;
+def: InstRW<[SBWriteResGroup6], (instregex "XOR64ri8")>;
+def: InstRW<[SBWriteResGroup6], (instregex "XOR8ri")>;
+def: InstRW<[SBWriteResGroup6], (instregex "XOR8rr")>;
+
+def SBWriteResGroup7 : SchedWriteRes<[SBPort0]> {
+ let Latency = 2;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup7], (instregex "MOVMSKPDrr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "MOVMSKPSrr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "MOVPDI2DIrr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "MOVPQIto64rr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "PMOVMSKBrr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "VMOVMSKPDYrr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "VMOVMSKPDrr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "VMOVMSKPSrr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "VMOVPDI2DIrr")>;
+def: InstRW<[SBWriteResGroup7], (instregex "VMOVPQIto64rr")>;
+
+def SBWriteResGroup9 : SchedWriteRes<[SBPort0]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [2];
+}
+def: InstRW<[SBWriteResGroup9], (instregex "BLENDVPDrr0")>;
+def: InstRW<[SBWriteResGroup9], (instregex "BLENDVPSrr0")>;
+def: InstRW<[SBWriteResGroup9], (instregex "ROL32ri")>;
+def: InstRW<[SBWriteResGroup9], (instregex "ROL8ri")>;
+def: InstRW<[SBWriteResGroup9], (instregex "ROR32ri")>;
+def: InstRW<[SBWriteResGroup9], (instregex "ROR8ri")>;
+def: InstRW<[SBWriteResGroup9], (instregex "SETAr")>;
+def: InstRW<[SBWriteResGroup9], (instregex "SETBEr")>;
+def: InstRW<[SBWriteResGroup9], (instregex "VBLENDVPDYrr")>;
+def: InstRW<[SBWriteResGroup9], (instregex "VBLENDVPDrr")>;
+def: InstRW<[SBWriteResGroup9], (instregex "VBLENDVPSYrr")>;
+def: InstRW<[SBWriteResGroup9], (instregex "VBLENDVPSrr")>;
+
+def SBWriteResGroup10 : SchedWriteRes<[SBPort15]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [2];
+}
+def: InstRW<[SBWriteResGroup10], (instregex "VPBLENDVBrr")>;
+
+def SBWriteResGroup11 : SchedWriteRes<[SBPort015]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [2];
+}
+def: InstRW<[SBWriteResGroup11], (instregex "SCASB")>;
+def: InstRW<[SBWriteResGroup11], (instregex "SCASL")>;
+def: InstRW<[SBWriteResGroup11], (instregex "SCASQ")>;
+def: InstRW<[SBWriteResGroup11], (instregex "SCASW")>;
+
+def SBWriteResGroup12 : SchedWriteRes<[SBPort0,SBPort1]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup12], (instregex "COMISDrr")>;
+def: InstRW<[SBWriteResGroup12], (instregex "COMISSrr")>;
+def: InstRW<[SBWriteResGroup12], (instregex "UCOMISDrr")>;
+def: InstRW<[SBWriteResGroup12], (instregex "UCOMISSrr")>;
+def: InstRW<[SBWriteResGroup12], (instregex "VCOMISDrr")>;
+def: InstRW<[SBWriteResGroup12], (instregex "VCOMISSrr")>;
+def: InstRW<[SBWriteResGroup12], (instregex "VUCOMISDrr")>;
+def: InstRW<[SBWriteResGroup12], (instregex "VUCOMISSrr")>;
+
+def SBWriteResGroup13 : SchedWriteRes<[SBPort0,SBPort5]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup13], (instregex "CVTPS2PDrr")>;
+def: InstRW<[SBWriteResGroup13], (instregex "PTESTrr")>;
+def: InstRW<[SBWriteResGroup13], (instregex "VCVTPS2PDYrr")>;
+def: InstRW<[SBWriteResGroup13], (instregex "VCVTPS2PDrr")>;
+def: InstRW<[SBWriteResGroup13], (instregex "VPTESTYrr")>;
+def: InstRW<[SBWriteResGroup13], (instregex "VPTESTrr")>;
+
+def SBWriteResGroup14 : SchedWriteRes<[SBPort0,SBPort15]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup14], (instregex "PSLLDrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "PSLLQrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "PSLLWrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "PSRADrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "PSRAWrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "PSRLDrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "PSRLQrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "PSRLWrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "VPSRADrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "VPSRAWrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "VPSRLDrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "VPSRLQrr")>;
+def: InstRW<[SBWriteResGroup14], (instregex "VPSRLWrr")>;
+
+def SBWriteResGroup15 : SchedWriteRes<[SBPort0,SBPort015]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup15], (instregex "FNSTSW16r")>;
+
+def SBWriteResGroup16 : SchedWriteRes<[SBPort1,SBPort0]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup16], (instregex "BSWAP32r")>;
+
+def SBWriteResGroup17 : SchedWriteRes<[SBPort5,SBPort15]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup17], (instregex "PINSRBrr")>;
+def: InstRW<[SBWriteResGroup17], (instregex "PINSRDrr")>;
+def: InstRW<[SBWriteResGroup17], (instregex "PINSRQrr")>;
+def: InstRW<[SBWriteResGroup17], (instregex "PINSRWrri")>;
+def: InstRW<[SBWriteResGroup17], (instregex "VPINSRBrr")>;
+def: InstRW<[SBWriteResGroup17], (instregex "VPINSRDrr")>;
+def: InstRW<[SBWriteResGroup17], (instregex "VPINSRQrr")>;
+def: InstRW<[SBWriteResGroup17], (instregex "VPINSRWrri")>;
+
+def SBWriteResGroup18 : SchedWriteRes<[SBPort5,SBPort015]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup18], (instregex "MMX_MOVDQ2Qrr")>;
+
+def SBWriteResGroup19 : SchedWriteRes<[SBPort0,SBPort015]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup19], (instregex "ADC64ri8")>;
+def: InstRW<[SBWriteResGroup19], (instregex "ADC64rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "ADC8ri")>;
+def: InstRW<[SBWriteResGroup19], (instregex "ADC8rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVAE32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVB32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVE32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVG32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVGE32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVL32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVLE32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVNE32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVNO32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVNP32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVNS32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVO32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVP32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "CMOVS32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "SBB32rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "SBB64ri8")>;
+def: InstRW<[SBWriteResGroup19], (instregex "SBB8ri")>;
+def: InstRW<[SBWriteResGroup19], (instregex "SBB8rr")>;
+def: InstRW<[SBWriteResGroup19], (instregex "SHLD32rri8")>;
+def: InstRW<[SBWriteResGroup19], (instregex "SHRD32rri8")>;
+
+def SBWriteResGroup20 : SchedWriteRes<[SBPort0]> {
+ let Latency = 3;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup20], (instregex "MMX_PMADDUBSWrr64")>;
+def: InstRW<[SBWriteResGroup20], (instregex "MMX_PMULHRSWrr64")>;
+def: InstRW<[SBWriteResGroup20], (instregex "MMX_PMULUDQirr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMADDUBSWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMADDWDrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMULDQrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMULHRSWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMULHUWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMULHWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMULLDrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMULLWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PMULUDQrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "PSADBWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VMOVMSKPSYrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VPMADDUBSWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VPMADDWDrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VPMULDQrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VPMULHRSWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VPMULHWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VPMULLDrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VPMULLWrr")>;
+def: InstRW<[SBWriteResGroup20], (instregex "VPSADBWrr")>;
+
+def SBWriteResGroup21 : SchedWriteRes<[SBPort1]> {
+ let Latency = 3;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup21], (instregex "ADDPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ADDPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ADDSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ADDSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ADDSUBPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ADDSUBPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ADD_FPrST0")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ADD_FST0r")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ADD_FrST0")>;
+def: InstRW<[SBWriteResGroup21], (instregex "BSF32rr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "BSR32rr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CMPPDrri")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CMPPSrri")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CMPSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CMPSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CRC32r32r32")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CRC32r32r8")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CVTDQ2PSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CVTPS2DQrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "CVTTPS2DQrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MAXPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MAXPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MAXSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MAXSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MINPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MINPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MINSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MINSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MMX_CVTPI2PSirr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MMX_CVTPS2PIirr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MMX_CVTTPS2PIirr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "MUL8r")>;
+def: InstRW<[SBWriteResGroup21], (instregex "POPCNT32rr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ROUNDPDr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ROUNDPSr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ROUNDSDr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "ROUNDSSr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUBPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUBPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUBR_FPrST0")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUBR_FST0r")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUBR_FrST0")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUBSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUBSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUB_FPrST0")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUB_FST0r")>;
+def: InstRW<[SBWriteResGroup21], (instregex "SUB_FrST0")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDPDYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDPSYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDSUBPDYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDSUBPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDSUBPSYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VADDSUBPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VBROADCASTF128")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCMPPDYrri")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCMPPDrri")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCMPPSYrri")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCMPPSrri")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCMPSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCMPSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCVTDQ2PSYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCVTDQ2PSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCVTPS2DQYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCVTPS2DQrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VCVTTPS2DQrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMAXPDYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMAXPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMAXPSYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMAXPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMAXSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMAXSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMINPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMINPSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMINSDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VMINSSrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VROUNDPDr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VROUNDPSr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VROUNDSDr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VSUBPDYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VSUBPDrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VSUBPSYrr")>;
+def: InstRW<[SBWriteResGroup21], (instregex "VSUBPSrr")>;
+
+def SBWriteResGroup22 : SchedWriteRes<[SBPort0,SBPort5]> {
+ let Latency = 3;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup22], (instregex "EXTRACTPSrr")>;
+def: InstRW<[SBWriteResGroup22], (instregex "VEXTRACTPSrr")>;
+
+def SBWriteResGroup23 : SchedWriteRes<[SBPort0,SBPort15]> {
+ let Latency = 3;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup23], (instregex "PEXTRBrr")>;
+def: InstRW<[SBWriteResGroup23], (instregex "PEXTRDrr")>;
+def: InstRW<[SBWriteResGroup23], (instregex "PEXTRQrr")>;
+def: InstRW<[SBWriteResGroup23], (instregex "PEXTRWri")>;
+def: InstRW<[SBWriteResGroup23], (instregex "VPEXTRBrr")>;
+def: InstRW<[SBWriteResGroup23], (instregex "VPEXTRDrr")>;
+def: InstRW<[SBWriteResGroup23], (instregex "VPEXTRQrr")>;
+def: InstRW<[SBWriteResGroup23], (instregex "VPEXTRWri")>;
+def: InstRW<[SBWriteResGroup23], (instregex "SHL64rCL")>;
+def: InstRW<[SBWriteResGroup23], (instregex "SHL8rCL")>;
+
+def SBWriteResGroup24 : SchedWriteRes<[SBPort15]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+ let ResourceCycles = [3];
+}
+def: InstRW<[SBWriteResGroup24], (instregex "MMX_PHADDSWrr64")>;
+def: InstRW<[SBWriteResGroup24], (instregex "MMX_PHADDWrr64")>;
+def: InstRW<[SBWriteResGroup24], (instregex "MMX_PHADDrr64")>;
+def: InstRW<[SBWriteResGroup24], (instregex "MMX_PHSUBDrr64")>;
+def: InstRW<[SBWriteResGroup24], (instregex "MMX_PHSUBSWrr64")>;
+def: InstRW<[SBWriteResGroup24], (instregex "MMX_PHSUBWrr64")>;
+def: InstRW<[SBWriteResGroup24], (instregex "PHADDDrr")>;
+def: InstRW<[SBWriteResGroup24], (instregex "PHADDSWrr128")>;
+def: InstRW<[SBWriteResGroup24], (instregex "PHADDWrr")>;
+def: InstRW<[SBWriteResGroup24], (instregex "PHSUBDrr")>;
+def: InstRW<[SBWriteResGroup24], (instregex "PHSUBSWrr128")>;
+def: InstRW<[SBWriteResGroup24], (instregex "PHSUBWrr")>;
+def: InstRW<[SBWriteResGroup24], (instregex "VPHADDDrr")>;
+def: InstRW<[SBWriteResGroup24], (instregex "VPHADDSWrr128")>;
+def: InstRW<[SBWriteResGroup24], (instregex "VPHADDWrr")>;
+def: InstRW<[SBWriteResGroup24], (instregex "VPHSUBDrr")>;
+def: InstRW<[SBWriteResGroup24], (instregex "VPHSUBSWrr128")>;
+def: InstRW<[SBWriteResGroup24], (instregex "VPHSUBWrr")>;
+
+def SBWriteResGroup25 : SchedWriteRes<[SBPort015]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+ let ResourceCycles = [3];
+}
+def: InstRW<[SBWriteResGroup25], (instregex "LEAVE64")>;
+def: InstRW<[SBWriteResGroup25], (instregex "XADD32rr")>;
+def: InstRW<[SBWriteResGroup25], (instregex "XADD8rr")>;
+
+def SBWriteResGroup26 : SchedWriteRes<[SBPort0,SBPort015]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
+}
+def: InstRW<[SBWriteResGroup26], (instregex "CMOVA32rr")>;
+def: InstRW<[SBWriteResGroup26], (instregex "CMOVBE32rr")>;
+
+def SBWriteResGroup27 : SchedWriteRes<[SBPort0,SBPort1]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup27], (instregex "MUL64r")>;
+
+def SBWriteResGroup28 : SchedWriteRes<[SBPort1,SBPort5]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup28], (instregex "CVTDQ2PDrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "CVTPD2DQrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "CVTPD2PSrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "CVTSD2SSrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "CVTSI2SD64rr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "CVTSI2SDrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "CVTTPD2DQrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "MMX_CVTPD2PIirr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "MMX_CVTPI2PDirr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "MMX_CVTTPD2PIirr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTDQ2PDYrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTDQ2PDrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTPD2DQYrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTPD2DQrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTPD2PSYrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTPD2PSrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTSI2SD64rr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTSI2SDrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTTPD2DQYrr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTTPD2DQrr")>;
+
+def SBWriteResGroup29 : SchedWriteRes<[SBPort1,SBPort015]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup29], (instregex "MOV64sr")>;
+def: InstRW<[SBWriteResGroup29], (instregex "PAUSE")>;
+
+def SBWriteResGroup30 : SchedWriteRes<[SBPort0]> {
+ let Latency = 5;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup30], (instregex "MULPDrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "MULPSrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "MULSDrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "MULSSrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "MUL_FPrST0")>;
+def: InstRW<[SBWriteResGroup30], (instregex "MUL_FST0r")>;
+def: InstRW<[SBWriteResGroup30], (instregex "MUL_FrST0")>;
+def: InstRW<[SBWriteResGroup30], (instregex "PCMPGTQrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "PHMINPOSUWrr128")>;
+def: InstRW<[SBWriteResGroup30], (instregex "RCPPSr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "RCPSSr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "RSQRTPSr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "RSQRTSSr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VMULPDYrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VMULPDrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VMULPSYrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VMULPSrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VMULSDrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VMULSSrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VPCMPGTQrr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VPHMINPOSUWrr128")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VRSQRTPSr")>;
+def: InstRW<[SBWriteResGroup30], (instregex "VRSQRTSSr")>;
+
+def SBWriteResGroup31 : SchedWriteRes<[SBPort23]> {
+ let Latency = 5;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup31], (instregex "MOV32rm")>;
+def: InstRW<[SBWriteResGroup31], (instregex "MOV8rm")>;
+def: InstRW<[SBWriteResGroup31], (instregex "MOVSX32rm16")>;
+def: InstRW<[SBWriteResGroup31], (instregex "MOVSX32rm8")>;
+def: InstRW<[SBWriteResGroup31], (instregex "MOVZX32rm16")>;
+def: InstRW<[SBWriteResGroup31], (instregex "MOVZX32rm8")>;
+def: InstRW<[SBWriteResGroup31], (instregex "PREFETCH")>;
+
+def SBWriteResGroup32 : SchedWriteRes<[SBPort0,SBPort1]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup32], (instregex "CVTSD2SI64rr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "CVTSD2SIrr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "CVTSS2SI64rr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "CVTSS2SIrr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "CVTTSD2SI64rr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "CVTTSD2SIrr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "CVTTSS2SI64rr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "CVTTSS2SIrr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "VCVTSD2SI64rr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "VCVTSS2SI64rr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "VCVTSS2SIrr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "VCVTTSD2SI64rr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "VCVTTSD2SIrr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "VCVTTSS2SI64rr")>;
+def: InstRW<[SBWriteResGroup32], (instregex "VCVTTSS2SIrr")>;
+
+def SBWriteResGroup33 : SchedWriteRes<[SBPort4,SBPort23]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup33], (instregex "MOV64mr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOV8mr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVAPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVAPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVDQAmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVDQUmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVHPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVHPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVLPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVLPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVNTDQmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVNTI_64mr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVNTImr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVNTPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVNTPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVPDI2DImr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVPQI2QImr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVPQIto64mr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVSSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVUPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "MOVUPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "PUSH64i8")>;
+def: InstRW<[SBWriteResGroup33], (instregex "PUSH64r")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VEXTRACTF128mr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVAPDYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVAPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVAPSYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVAPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVDQAYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVDQAmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVDQUYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVDQUmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVHPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVHPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVLPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVLPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVNTDQYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVNTDQmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVNTPDYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVNTPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVNTPSYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVNTPSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVPDI2DImr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVPQI2QImr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVPQIto64mr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVSDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVSSmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVUPDYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVUPDmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVUPSYmr")>;
+def: InstRW<[SBWriteResGroup33], (instregex "VMOVUPSmr")>;
+
+def SBWriteResGroup34 : SchedWriteRes<[SBPort0,SBPort15]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,2];
+}
+def: InstRW<[SBWriteResGroup34], (instregex "MPSADBWrri")>;
+def: InstRW<[SBWriteResGroup34], (instregex "VMPSADBWrri")>;
+
+def SBWriteResGroup35 : SchedWriteRes<[SBPort1,SBPort5]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,2];
+}
+def: InstRW<[SBWriteResGroup35], (instregex "CLI")>;
+def: InstRW<[SBWriteResGroup35], (instregex "CVTSI2SS64rr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "CVTSI2SSrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "HADDPDrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "HADDPSrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "HSUBPDrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "HSUBPSrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VCVTSI2SS64rr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VCVTSI2SSrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VHADDPDrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VHADDPSYrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VHADDPSrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VHSUBPDYrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VHSUBPDrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VHSUBPSYrr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VHSUBPSrr")>;
+
+def SBWriteResGroup36 : SchedWriteRes<[SBPort4,SBPort5,SBPort23]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup36], (instregex "CALL64r")>;
+def: InstRW<[SBWriteResGroup36], (instregex "EXTRACTPSmr")>;
+def: InstRW<[SBWriteResGroup36], (instregex "VEXTRACTPSmr")>;
+
+def SBWriteResGroup37 : SchedWriteRes<[SBPort4,SBPort01,SBPort23]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup37], (instregex "VMASKMOVPDYrm")>;
+def: InstRW<[SBWriteResGroup37], (instregex "VMASKMOVPDmr")>;
+def: InstRW<[SBWriteResGroup37], (instregex "VMASKMOVPSmr")>;
+
+def SBWriteResGroup38 : SchedWriteRes<[SBPort4,SBPort23,SBPort0]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup38], (instregex "SETAEm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETBm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETEm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETGEm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETGm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETLEm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETLm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETNEm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETNOm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETNPm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETNSm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETOm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETPm")>;
+def: InstRW<[SBWriteResGroup38], (instregex "SETSm")>;
+
+def SBWriteResGroup39 : SchedWriteRes<[SBPort4,SBPort23,SBPort15]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup39], (instregex "PEXTRBmr")>;
+def: InstRW<[SBWriteResGroup39], (instregex "VPEXTRBmr")>;
+def: InstRW<[SBWriteResGroup39], (instregex "VPEXTRDmr")>;
+def: InstRW<[SBWriteResGroup39], (instregex "VPEXTRWmr")>;
+
+def SBWriteResGroup40 : SchedWriteRes<[SBPort4,SBPort23,SBPort015]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup40], (instregex "MOV8mi")>;
+def: InstRW<[SBWriteResGroup40], (instregex "STOSB")>;
+def: InstRW<[SBWriteResGroup40], (instregex "STOSL")>;
+def: InstRW<[SBWriteResGroup40], (instregex "STOSQ")>;
+def: InstRW<[SBWriteResGroup40], (instregex "STOSW")>;
+
+def SBWriteResGroup41 : SchedWriteRes<[SBPort5,SBPort015]> {
+ let Latency = 5;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,3];
+}
+def: InstRW<[SBWriteResGroup41], (instregex "FNINIT")>;
+
+def SBWriteResGroup42 : SchedWriteRes<[SBPort0,SBPort015]> {
+ let Latency = 5;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,3];
+}
+def: InstRW<[SBWriteResGroup42], (instregex "CMPXCHG32rr")>;
+def: InstRW<[SBWriteResGroup42], (instregex "CMPXCHG8rr")>;
+
+def SBWriteResGroup43 : SchedWriteRes<[SBPort4,SBPort23,SBPort0]> {
+ let Latency = 5;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,2];
+}
+def: InstRW<[SBWriteResGroup43], (instregex "SETAm")>;
+def: InstRW<[SBWriteResGroup43], (instregex "SETBEm")>;
+
+def SBWriteResGroup44 : SchedWriteRes<[SBPort0,SBPort4,SBPort5,SBPort23]> {
+ let Latency = 5;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,1,1];
+}
+def: InstRW<[SBWriteResGroup44], (instregex "LDMXCSR")>;
+def: InstRW<[SBWriteResGroup44], (instregex "STMXCSR")>;
+def: InstRW<[SBWriteResGroup44], (instregex "VLDMXCSR")>;
+def: InstRW<[SBWriteResGroup44], (instregex "VSTMXCSR")>;
+
+def SBWriteResGroup45 : SchedWriteRes<[SBPort0,SBPort4,SBPort23,SBPort15]> {
+ let Latency = 5;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,1,1];
+}
+def: InstRW<[SBWriteResGroup45], (instregex "PEXTRDmr")>;
+def: InstRW<[SBWriteResGroup45], (instregex "PEXTRQmr")>;
+def: InstRW<[SBWriteResGroup45], (instregex "VPEXTRQmr")>;
+def: InstRW<[SBWriteResGroup45], (instregex "PUSHF16")>;
+def: InstRW<[SBWriteResGroup45], (instregex "PUSHF64")>;
+
+def SBWriteResGroup46 : SchedWriteRes<[SBPort4,SBPort5,SBPort01,SBPort23]> {
+ let Latency = 5;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,1,1];
+}
+def: InstRW<[SBWriteResGroup46], (instregex "CLFLUSH")>;
+
+def SBWriteResGroup47 : SchedWriteRes<[SBPort4,SBPort5,SBPort01,SBPort23]> {
+ let Latency = 5;
+ let NumMicroOps = 5;
+ let ResourceCycles = [1,2,1,1];
+}
+def: InstRW<[SBWriteResGroup47], (instregex "FXRSTOR")>;
+
+def SBWriteResGroup48 : SchedWriteRes<[SBPort23]> {
+ let Latency = 6;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup48], (instregex "LDDQUrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MMX_MOVD64from64rm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOV64toPQIrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVAPDrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVAPSrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVDDUPrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVDI2PDIrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVDQArm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVDQUrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVNTDQArm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVSHDUPrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVSLDUPrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVSSrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVUPDrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "MOVUPSrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "POP64r")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VBROADCASTSSrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VLDDQUYrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VLDDQUrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOV64toPQIrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVAPDrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVAPSrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVDDUPrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVDI2PDIrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVDQArm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVDQUrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVNTDQArm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVQI2PQIrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVSDrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVSHDUPrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVSLDUPrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVSSrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVUPDrm")>;
+def: InstRW<[SBWriteResGroup48], (instregex "VMOVUPSrm")>;
+
+def SBWriteResGroup49 : SchedWriteRes<[SBPort5,SBPort23]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup49], (instregex "JMP64m")>;
+def: InstRW<[SBWriteResGroup49], (instregex "MOV64sm")>;
+
+def SBWriteResGroup50 : SchedWriteRes<[SBPort23,SBPort0]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup50], (instregex "BT64mi8")>;
+
+def SBWriteResGroup51 : SchedWriteRes<[SBPort23,SBPort15]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup51], (instregex "MMX_PABSBrm64")>;
+def: InstRW<[SBWriteResGroup51], (instregex "MMX_PABSDrm64")>;
+def: InstRW<[SBWriteResGroup51], (instregex "MMX_PABSWrm64")>;
+def: InstRW<[SBWriteResGroup51], (instregex "MMX_PALIGNR64irm")>;
+def: InstRW<[SBWriteResGroup51], (instregex "MMX_PSHUFBrm64")>;
+def: InstRW<[SBWriteResGroup51], (instregex "MMX_PSIGNBrm64")>;
+def: InstRW<[SBWriteResGroup51], (instregex "MMX_PSIGNDrm64")>;
+def: InstRW<[SBWriteResGroup51], (instregex "MMX_PSIGNWrm64")>;
+
+def SBWriteResGroup52 : SchedWriteRes<[SBPort23,SBPort015]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup52], (instregex "ADD64rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "ADD8rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "AND64rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "AND8rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "CMP64mi8")>;
+def: InstRW<[SBWriteResGroup52], (instregex "CMP64mr")>;
+def: InstRW<[SBWriteResGroup52], (instregex "CMP64rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "CMP8mi")>;
+def: InstRW<[SBWriteResGroup52], (instregex "CMP8mr")>;
+def: InstRW<[SBWriteResGroup52], (instregex "CMP8rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "LODSL")>;
+def: InstRW<[SBWriteResGroup52], (instregex "LODSQ")>;
+def: InstRW<[SBWriteResGroup52], (instregex "OR64rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "OR8rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "SUB64rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "SUB8rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "XOR64rm")>;
+def: InstRW<[SBWriteResGroup52], (instregex "XOR8rm")>;
+
+def SBWriteResGroup53 : SchedWriteRes<[SBPort4,SBPort23]> {
+ let Latency = 6;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,2];
+}
+def: InstRW<[SBWriteResGroup53], (instregex "POP64rmm")>;
+def: InstRW<[SBWriteResGroup53], (instregex "PUSH64rmm")>;
+def: InstRW<[SBWriteResGroup53], (instregex "ST_F32m")>;
+def: InstRW<[SBWriteResGroup53], (instregex "ST_F64m")>;
+def: InstRW<[SBWriteResGroup53], (instregex "ST_FP32m")>;
+def: InstRW<[SBWriteResGroup53], (instregex "ST_FP64m")>;
+def: InstRW<[SBWriteResGroup53], (instregex "ST_FP80m")>;
+
+def SBWriteResGroup54 : SchedWriteRes<[SBPort23]> {
+ let Latency = 7;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup54], (instregex "VBROADCASTSDYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VBROADCASTSSrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVAPDYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVAPSYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVDDUPYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVDQAYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVDQUYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVSHDUPYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVSLDUPYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVUPDYrm")>;
+def: InstRW<[SBWriteResGroup54], (instregex "VMOVUPSYrm")>;
+
+def SBWriteResGroup55 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup55], (instregex "CVTPS2PDrm")>;
+def: InstRW<[SBWriteResGroup55], (instregex "CVTSS2SDrm")>;
+def: InstRW<[SBWriteResGroup55], (instregex "VCVTPS2PDYrm")>;
+def: InstRW<[SBWriteResGroup55], (instregex "VCVTPS2PDrm")>;
+def: InstRW<[SBWriteResGroup55], (instregex "VCVTSS2SDrm")>;
+def: InstRW<[SBWriteResGroup55], (instregex "VTESTPDrm")>;
+def: InstRW<[SBWriteResGroup55], (instregex "VTESTPSrm")>;
+
+def SBWriteResGroup56 : SchedWriteRes<[SBPort5,SBPort23]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup56], (instregex "ANDNPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "ANDNPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "ANDPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "ANDPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "INSERTPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "MOVHPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "MOVHPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "MOVLPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "MOVLPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "ORPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "ORPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "SHUFPDrmi")>;
+def: InstRW<[SBWriteResGroup56], (instregex "SHUFPSrmi")>;
+def: InstRW<[SBWriteResGroup56], (instregex "UNPCKHPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "UNPCKHPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "UNPCKLPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "UNPCKLPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VANDNPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VANDNPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VANDPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VANDPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VBROADCASTF128")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VINSERTPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VMOVHPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VMOVHPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VMOVLPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VMOVLPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VORPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VORPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VPERMILPDmi")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VPERMILPDri")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VPERMILPSmi")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VPERMILPSri")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VSHUFPDrmi")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VSHUFPSrmi")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VUNPCKHPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VUNPCKHPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VUNPCKLPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VUNPCKLPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VXORPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "VXORPSrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "XORPDrm")>;
+def: InstRW<[SBWriteResGroup56], (instregex "XORPSrm")>;
+
+def SBWriteResGroup57 : SchedWriteRes<[SBPort5,SBPort015]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup57], (instregex "AESDECLASTrr")>;
+def: InstRW<[SBWriteResGroup57], (instregex "AESDECrr")>;
+def: InstRW<[SBWriteResGroup57], (instregex "AESENCLASTrr")>;
+def: InstRW<[SBWriteResGroup57], (instregex "AESENCrr")>;
+def: InstRW<[SBWriteResGroup57], (instregex "KANDQrr")>;
+def: InstRW<[SBWriteResGroup57], (instregex "VAESDECLASTrr")>;
+def: InstRW<[SBWriteResGroup57], (instregex "VAESDECrr")>;
+def: InstRW<[SBWriteResGroup57], (instregex "VAESENCrr")>;
+
+def SBWriteResGroup58 : SchedWriteRes<[SBPort23,SBPort0]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup58], (instregex "BLENDPDrmi")>;
+def: InstRW<[SBWriteResGroup58], (instregex "BLENDPSrmi")>;
+def: InstRW<[SBWriteResGroup58], (instregex "VBLENDPDrmi")>;
+def: InstRW<[SBWriteResGroup58], (instregex "VBLENDPSrmi")>;
+def: InstRW<[SBWriteResGroup58], (instregex "VINSERTF128rm")>;
+
+def SBWriteResGroup59 : SchedWriteRes<[SBPort23,SBPort15]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup59], (instregex "MMX_PADDQirm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PABSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PABSDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PABSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PACKSSDWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PACKSSWBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PACKUSDWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PACKUSWBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PADDBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PADDDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PADDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PADDSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PADDSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PADDUSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PADDUSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PADDWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PALIGNRrmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PAVGBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PAVGWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PBLENDWrmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PCMPEQBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PCMPEQDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PCMPEQQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PCMPEQWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PCMPGTBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PCMPGTDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PCMPGTWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PINSRBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PINSRDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PINSRQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PINSRWrmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMAXSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMAXSDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMAXSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMAXUBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMAXUDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMAXUWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMINSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMINSDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMINSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMINUBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMINUDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMINUWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVSXBDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVSXBQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVSXBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVSXDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVSXWDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVSXWQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVZXBDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVZXBQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVZXBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVZXDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVZXWDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PMOVZXWQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSHUFBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSHUFDmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSHUFHWmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSHUFLWmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSIGNBrm128")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSIGNDrm128")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSIGNWrm128")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSUBBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSUBDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSUBQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSUBSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSUBSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSUBUSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSUBUSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PSUBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PUNPCKHBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PUNPCKHDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PUNPCKHQDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PUNPCKHWDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PUNPCKLBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PUNPCKLDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PUNPCKLQDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "PUNPCKLWDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPABSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPABSDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPABSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPACKSSDWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPACKSSWBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPACKUSDWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPACKUSWBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPADDBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPADDDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPADDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPADDSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPADDSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPADDUSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPADDUSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPADDWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPALIGNRrmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPAVGBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPAVGWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPBLENDWrmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPCMPEQBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPCMPEQDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPCMPEQQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPCMPEQWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPCMPGTBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPCMPGTDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPCMPGTWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPINSRBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPINSRDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPINSRQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPINSRWrmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMAXSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMAXSDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMAXSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMAXUBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMAXUDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMAXUWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMINSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMINSDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMINSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMINUBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMINUDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMINUWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVSXBDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVSXBQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVSXBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVSXDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVSXWDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVSXWQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVZXBDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVZXBQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVZXBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVZXDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVZXWDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPMOVZXWQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSHUFBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSHUFDmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSHUFHWmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSHUFLWmi")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSIGNBrm128")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSIGNDrm128")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSIGNWrm128")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSUBBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSUBDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSUBQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSUBSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSUBSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSUBUSBrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSUBUSWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPSUBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPUNPCKHBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPUNPCKHDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPUNPCKHQDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPUNPCKHWDrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPUNPCKLBWrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPUNPCKLDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPUNPCKLQDQrm")>;
+def: InstRW<[SBWriteResGroup59], (instregex "VPUNPCKLWDrm")>;
+
+def SBWriteResGroup60 : SchedWriteRes<[SBPort23,SBPort015]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup60], (instregex "PANDNrm")>;
+def: InstRW<[SBWriteResGroup60], (instregex "PANDrm")>;
+def: InstRW<[SBWriteResGroup60], (instregex "PORrm")>;
+def: InstRW<[SBWriteResGroup60], (instregex "PXORrm")>;
+def: InstRW<[SBWriteResGroup60], (instregex "VPANDNrm")>;
+def: InstRW<[SBWriteResGroup60], (instregex "VPANDrm")>;
+def: InstRW<[SBWriteResGroup60], (instregex "VPORrm")>;
+def: InstRW<[SBWriteResGroup60], (instregex "VPXORrm")>;
+
+def SBWriteResGroup61 : SchedWriteRes<[SBPort0,SBPort0]> {
+ let Latency = 7;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
+}
+def: InstRW<[SBWriteResGroup61], (instregex "VRCPPSr")>;
+def: InstRW<[SBWriteResGroup61], (instregex "VRSQRTPSYr")>;
+
+def SBWriteResGroup62 : SchedWriteRes<[SBPort5,SBPort23]> {
+ let Latency = 7;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
+}
+def: InstRW<[SBWriteResGroup62], (instregex "VERRm")>;
+def: InstRW<[SBWriteResGroup62], (instregex "VERWm")>;
+
+def SBWriteResGroup63 : SchedWriteRes<[SBPort23,SBPort015]> {
+ let Latency = 7;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,2];
+}
+def: InstRW<[SBWriteResGroup63], (instregex "LODSB")>;
+def: InstRW<[SBWriteResGroup63], (instregex "LODSW")>;
+
+def SBWriteResGroup64 : SchedWriteRes<[SBPort5,SBPort01,SBPort23]> {
+ let Latency = 7;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup64], (instregex "FARJMP64")>;
+
+def SBWriteResGroup65 : SchedWriteRes<[SBPort23,SBPort0,SBPort015]> {
+ let Latency = 7;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup65], (instregex "ADC64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "ADC8rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVAE64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVB64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVE64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVG64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVGE64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVL64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVLE64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVNE64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVNO64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVNP64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVNS64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVO64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVP64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "CMOVS64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "SBB64rm")>;
+def: InstRW<[SBWriteResGroup65], (instregex "SBB8rm")>;
+
+def SBWriteResGroup66 : SchedWriteRes<[SBPort0,SBPort4,SBPort23]> {
+ let Latency = 7;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,2];
+}
+def: InstRW<[SBWriteResGroup66], (instregex "FNSTSWm")>;
+
+def SBWriteResGroup67 : SchedWriteRes<[SBPort1,SBPort5,SBPort015]> {
+ let Latency = 7;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,2,1];
+}
+def: InstRW<[SBWriteResGroup67], (instregex "SLDT32r")>;
+def: InstRW<[SBWriteResGroup67], (instregex "STR32r")>;
+
+def SBWriteResGroup68 : SchedWriteRes<[SBPort4,SBPort5,SBPort23]> {
+ let Latency = 7;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,2];
+}
+def: InstRW<[SBWriteResGroup68], (instregex "CALL64m")>;
+def: InstRW<[SBWriteResGroup68], (instregex "FNSTCW16m")>;
+
+def SBWriteResGroup69 : SchedWriteRes<[SBPort4,SBPort23,SBPort0]> {
+ let Latency = 7;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,2,1];
+}
+def: InstRW<[SBWriteResGroup69], (instregex "BTC64mi8")>;
+def: InstRW<[SBWriteResGroup69], (instregex "BTR64mi8")>;
+def: InstRW<[SBWriteResGroup69], (instregex "BTS64mi8")>;
+def: InstRW<[SBWriteResGroup69], (instregex "SAR64mi")>;
+def: InstRW<[SBWriteResGroup69], (instregex "SAR8mi")>;
+def: InstRW<[SBWriteResGroup69], (instregex "SHL64m1")>;
+def: InstRW<[SBWriteResGroup69], (instregex "SHL64mi")>;
+def: InstRW<[SBWriteResGroup69], (instregex "SHL8m1")>;
+def: InstRW<[SBWriteResGroup69], (instregex "SHL8mi")>;
+def: InstRW<[SBWriteResGroup69], (instregex "SHR64mi")>;
+def: InstRW<[SBWriteResGroup69], (instregex "SHR8mi")>;
+
+def SBWriteResGroup70 : SchedWriteRes<[SBPort4,SBPort23,SBPort015]> {
+ let Latency = 7;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,2,1];
+}
+def: InstRW<[SBWriteResGroup70], (instregex "ADD64mi8")>;
+def: InstRW<[SBWriteResGroup70], (instregex "ADD64mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "ADD8mi")>;
+def: InstRW<[SBWriteResGroup70], (instregex "ADD8mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "AND64mi8")>;
+def: InstRW<[SBWriteResGroup70], (instregex "AND64mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "AND8mi")>;
+def: InstRW<[SBWriteResGroup70], (instregex "AND8mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "DEC64m")>;
+def: InstRW<[SBWriteResGroup70], (instregex "DEC8m")>;
+def: InstRW<[SBWriteResGroup70], (instregex "INC64m")>;
+def: InstRW<[SBWriteResGroup70], (instregex "INC8m")>;
+def: InstRW<[SBWriteResGroup70], (instregex "NEG64m")>;
+def: InstRW<[SBWriteResGroup70], (instregex "NEG8m")>;
+def: InstRW<[SBWriteResGroup70], (instregex "NOT64m")>;
+def: InstRW<[SBWriteResGroup70], (instregex "NOT8m")>;
+def: InstRW<[SBWriteResGroup70], (instregex "OR64mi8")>;
+def: InstRW<[SBWriteResGroup70], (instregex "OR64mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "OR8mi")>;
+def: InstRW<[SBWriteResGroup70], (instregex "OR8mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "SUB64mi8")>;
+def: InstRW<[SBWriteResGroup70], (instregex "SUB64mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "SUB8mi")>;
+def: InstRW<[SBWriteResGroup70], (instregex "SUB8mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "TEST64rm")>;
+def: InstRW<[SBWriteResGroup70], (instregex "TEST8mi")>;
+def: InstRW<[SBWriteResGroup70], (instregex "TEST8rm")>;
+def: InstRW<[SBWriteResGroup70], (instregex "XOR64mi8")>;
+def: InstRW<[SBWriteResGroup70], (instregex "XOR64mr")>;
+def: InstRW<[SBWriteResGroup70], (instregex "XOR8mi")>;
+def: InstRW<[SBWriteResGroup70], (instregex "XOR8mr")>;
+
+def SBWriteResGroup71 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup71], (instregex "MMX_PMADDUBSWrm64")>;
+def: InstRW<[SBWriteResGroup71], (instregex "MMX_PMULHRSWrm64")>;
+def: InstRW<[SBWriteResGroup71], (instregex "VTESTPDYrm")>;
+def: InstRW<[SBWriteResGroup71], (instregex "VTESTPSYrm")>;
+
+def SBWriteResGroup72 : SchedWriteRes<[SBPort1,SBPort23]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup72], (instregex "BSF64rm")>;
+def: InstRW<[SBWriteResGroup72], (instregex "BSR64rm")>;
+def: InstRW<[SBWriteResGroup72], (instregex "CRC32r32m16")>;
+def: InstRW<[SBWriteResGroup72], (instregex "CRC32r32m8")>;
+def: InstRW<[SBWriteResGroup72], (instregex "FCOM32m")>;
+def: InstRW<[SBWriteResGroup72], (instregex "FCOM64m")>;
+def: InstRW<[SBWriteResGroup72], (instregex "FCOMP32m")>;
+def: InstRW<[SBWriteResGroup72], (instregex "FCOMP64m")>;
+def: InstRW<[SBWriteResGroup72], (instregex "MUL8m")>;
+
+def SBWriteResGroup73 : SchedWriteRes<[SBPort5,SBPort23]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup73], (instregex "VANDNPDYrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VANDNPSYrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VANDPDrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VANDPSrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VORPDYrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VORPSYrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VPERM2F128rm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VPERMILPDYri")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VPERMILPDmi")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VPERMILPSYri")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VPERMILPSmi")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VSHUFPDYrmi")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VSHUFPSYrmi")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VUNPCKHPDrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VUNPCKHPSrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VUNPCKLPDYrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VUNPCKLPSYrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VXORPDrm")>;
+def: InstRW<[SBWriteResGroup73], (instregex "VXORPSrm")>;
+
+def SBWriteResGroup74 : SchedWriteRes<[SBPort23,SBPort0]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup74], (instregex "VBLENDPDYrmi")>;
+def: InstRW<[SBWriteResGroup74], (instregex "VBLENDPSYrmi")>;
+
+def SBWriteResGroup75 : SchedWriteRes<[SBPort23,SBPort0]> {
+ let Latency = 8;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,2];
+}
+def: InstRW<[SBWriteResGroup75], (instregex "BLENDVPDrm0")>;
+def: InstRW<[SBWriteResGroup75], (instregex "BLENDVPSrm0")>;
+def: InstRW<[SBWriteResGroup75], (instregex "VBLENDVPDrm")>;
+def: InstRW<[SBWriteResGroup75], (instregex "VBLENDVPSrm")>;
+def: InstRW<[SBWriteResGroup75], (instregex "VMASKMOVPDrm")>;
+def: InstRW<[SBWriteResGroup75], (instregex "VMASKMOVPSrm")>;
+
+def SBWriteResGroup76 : SchedWriteRes<[SBPort23,SBPort15]> {
+ let Latency = 8;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,2];
+}
+def: InstRW<[SBWriteResGroup76], (instregex "PBLENDVBrr0")>;
+def: InstRW<[SBWriteResGroup76], (instregex "VPBLENDVBrm")>;
+
+def SBWriteResGroup77 : SchedWriteRes<[SBPort0,SBPort1,SBPort23]> {
+ let Latency = 8;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup77], (instregex "COMISDrm")>;
+def: InstRW<[SBWriteResGroup77], (instregex "COMISSrm")>;
+def: InstRW<[SBWriteResGroup77], (instregex "UCOMISDrm")>;
+def: InstRW<[SBWriteResGroup77], (instregex "UCOMISSrm")>;
+def: InstRW<[SBWriteResGroup77], (instregex "VCOMISDrm")>;
+def: InstRW<[SBWriteResGroup77], (instregex "VCOMISSrm")>;
+def: InstRW<[SBWriteResGroup77], (instregex "VUCOMISDrm")>;
+def: InstRW<[SBWriteResGroup77], (instregex "VUCOMISSrm")>;
+
+def SBWriteResGroup78 : SchedWriteRes<[SBPort0,SBPort5,SBPort23]> {
+ let Latency = 8;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup78], (instregex "PTESTrm")>;
+def: InstRW<[SBWriteResGroup78], (instregex "VPTESTrm")>;
+
+def SBWriteResGroup79 : SchedWriteRes<[SBPort0,SBPort23,SBPort15]> {
+ let Latency = 8;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup79], (instregex "PSLLDrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "PSLLQrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "PSLLWrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "PSRADrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "PSRAWrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "PSRLDrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "PSRLQrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "PSRLWrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "VPSLLDri")>;
+def: InstRW<[SBWriteResGroup79], (instregex "VPSLLQri")>;
+def: InstRW<[SBWriteResGroup79], (instregex "VPSLLWri")>;
+def: InstRW<[SBWriteResGroup79], (instregex "VPSRADrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "VPSRAWrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "VPSRLDrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "VPSRLQrm")>;
+def: InstRW<[SBWriteResGroup79], (instregex "VPSRLWrm")>;
+
+def SBWriteResGroup80 : SchedWriteRes<[SBPort23,SBPort15]> {
+ let Latency = 8;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,3];
+}
+def: InstRW<[SBWriteResGroup80], (instregex "MMX_PHADDSWrm64")>;
+def: InstRW<[SBWriteResGroup80], (instregex "MMX_PHADDWrm64")>;
+def: InstRW<[SBWriteResGroup80], (instregex "MMX_PHADDrm64")>;
+def: InstRW<[SBWriteResGroup80], (instregex "MMX_PHSUBDrm64")>;
+def: InstRW<[SBWriteResGroup80], (instregex "MMX_PHSUBSWrm64")>;
+def: InstRW<[SBWriteResGroup80], (instregex "MMX_PHSUBWrm64")>;
+
+def SBWriteResGroup81 : SchedWriteRes<[SBPort23,SBPort015]> {
+ let Latency = 8;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,3];
+}
+def: InstRW<[SBWriteResGroup81], (instregex "CMPXCHG64rm")>;
+def: InstRW<[SBWriteResGroup81], (instregex "CMPXCHG8rm")>;
+
+def SBWriteResGroup82 : SchedWriteRes<[SBPort23,SBPort0,SBPort015]> {
+ let Latency = 8;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,2,1];
+}
+def: InstRW<[SBWriteResGroup82], (instregex "CMOVA64rm")>;
+def: InstRW<[SBWriteResGroup82], (instregex "CMOVBE64rm")>;
+
+def SBWriteResGroup83 : SchedWriteRes<[SBPort23,SBPort015]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+ let ResourceCycles = [2,3];
+}
+def: InstRW<[SBWriteResGroup83], (instregex "CMPSB")>;
+def: InstRW<[SBWriteResGroup83], (instregex "CMPSL")>;
+def: InstRW<[SBWriteResGroup83], (instregex "CMPSQ")>;
+def: InstRW<[SBWriteResGroup83], (instregex "CMPSW")>;
+
+def SBWriteResGroup84 : SchedWriteRes<[SBPort4,SBPort5,SBPort23]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+ let ResourceCycles = [1,2,2];
+}
+def: InstRW<[SBWriteResGroup84], (instregex "FLDCW16m")>;
+
+def SBWriteResGroup85 : SchedWriteRes<[SBPort4,SBPort23,SBPort0]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+ let ResourceCycles = [1,2,2];
+}
+def: InstRW<[SBWriteResGroup85], (instregex "ROL64mi")>;
+def: InstRW<[SBWriteResGroup85], (instregex "ROL8mi")>;
+def: InstRW<[SBWriteResGroup85], (instregex "ROR64mi")>;
+def: InstRW<[SBWriteResGroup85], (instregex "ROR8mi")>;
+
+def SBWriteResGroup86 : SchedWriteRes<[SBPort4,SBPort23,SBPort015]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+ let ResourceCycles = [1,2,2];
+}
+def: InstRW<[SBWriteResGroup86], (instregex "MOVSB")>;
+def: InstRW<[SBWriteResGroup86], (instregex "MOVSL")>;
+def: InstRW<[SBWriteResGroup86], (instregex "MOVSQ")>;
+def: InstRW<[SBWriteResGroup86], (instregex "MOVSW")>;
+def: InstRW<[SBWriteResGroup86], (instregex "XADD64rm")>;
+def: InstRW<[SBWriteResGroup86], (instregex "XADD8rm")>;
+
+def SBWriteResGroup87 : SchedWriteRes<[SBPort4,SBPort5,SBPort01,SBPort23]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+ let ResourceCycles = [1,1,1,2];
+}
+def: InstRW<[SBWriteResGroup87], (instregex "FARCALL64")>;
+
+def SBWriteResGroup88 : SchedWriteRes<[SBPort4,SBPort23,SBPort0,SBPort015]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+ let ResourceCycles = [1,2,1,1];
+}
+def: InstRW<[SBWriteResGroup88], (instregex "SHLD64mri8")>;
+def: InstRW<[SBWriteResGroup88], (instregex "SHRD64mri8")>;
+
+def SBWriteResGroup89 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 9;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup89], (instregex "MMX_PMULUDQirm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMADDUBSWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMADDWDrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMULDQrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMULHRSWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMULHUWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMULHWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMULLDrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMULLWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PMULUDQrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "PSADBWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMADDUBSWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMADDWDrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMULDQrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMULHRSWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMULHUWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMULHWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMULLDrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMULLWrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPMULUDQrm")>;
+def: InstRW<[SBWriteResGroup89], (instregex "VPSADBWrm")>;
+
+def SBWriteResGroup90 : SchedWriteRes<[SBPort1,SBPort23]> {
+ let Latency = 9;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup90], (instregex "ADDPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ADDPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ADDSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ADDSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ADDSUBPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ADDSUBPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CMPPDrmi")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CMPPSrmi")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CMPSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CVTDQ2PSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CVTPS2DQrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CVTSI2SD64rm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CVTSI2SDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CVTTPS2DQrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MAXPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MAXPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MAXSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MAXSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MINPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MINPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MINSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MINSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MMX_CVTPI2PSirm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MMX_CVTPS2PIirm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "MMX_CVTTPS2PIirm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "POPCNT64rm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ROUNDPDm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ROUNDPSm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ROUNDSDm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "ROUNDSSm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "SUBPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "SUBPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "SUBSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "SUBSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VADDPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VADDPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VADDSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VADDSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VADDSUBPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VADDSUBPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCMPPDrmi")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCMPPSrmi")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCMPSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCMPSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCVTDQ2PSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCVTPS2DQrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCVTSI2SD64rm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCVTSI2SDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCVTTPS2DQrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VMAXPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VMAXPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VMAXSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VMAXSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VMINPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VMINPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VMINSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VMINSSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VROUNDPDm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VROUNDPSm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VROUNDSDm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VROUNDSSm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VSUBPDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VSUBPSrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VSUBSDrm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VSUBSSrm")>;
+
+def SBWriteResGroup91 : SchedWriteRes<[SBPort23,SBPort0]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,2];
+}
+def: InstRW<[SBWriteResGroup91], (instregex "VBLENDVPDYrm")>;
+def: InstRW<[SBWriteResGroup91], (instregex "VBLENDVPSYrm")>;
+def: InstRW<[SBWriteResGroup91], (instregex "VMASKMOVPDrm")>;
+def: InstRW<[SBWriteResGroup91], (instregex "VMASKMOVPSrm")>;
+
+def SBWriteResGroup92 : SchedWriteRes<[SBPort0,SBPort1,SBPort5]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup92], (instregex "DPPDrri")>;
+def: InstRW<[SBWriteResGroup92], (instregex "VDPPDrri")>;
+
+def SBWriteResGroup93 : SchedWriteRes<[SBPort0,SBPort1,SBPort23]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup93], (instregex "CVTSD2SI64rm")>;
+def: InstRW<[SBWriteResGroup93], (instregex "CVTSD2SIrm")>;
+def: InstRW<[SBWriteResGroup93], (instregex "CVTSS2SI64rm")>;
+def: InstRW<[SBWriteResGroup93], (instregex "CVTSS2SIrm")>;
+def: InstRW<[SBWriteResGroup93], (instregex "CVTTSD2SI64rm")>;
+def: InstRW<[SBWriteResGroup93], (instregex "CVTTSD2SIrm")>;
+def: InstRW<[SBWriteResGroup93], (instregex "CVTTSS2SI64rm")>;
+def: InstRW<[SBWriteResGroup93], (instregex "CVTTSS2SIrm")>;
+def: InstRW<[SBWriteResGroup93], (instregex "MUL64m")>;
+
+def SBWriteResGroup94 : SchedWriteRes<[SBPort0,SBPort5,SBPort23]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup94], (instregex "VPTESTYrm")>;
+
+def SBWriteResGroup95 : SchedWriteRes<[SBPort5,SBPort01,SBPort23]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup95], (instregex "LD_F32m")>;
+def: InstRW<[SBWriteResGroup95], (instregex "LD_F64m")>;
+def: InstRW<[SBWriteResGroup95], (instregex "LD_F80m")>;
+
+def SBWriteResGroup96 : SchedWriteRes<[SBPort23,SBPort15]> {
+ let Latency = 9;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,3];
+}
+def: InstRW<[SBWriteResGroup96], (instregex "PHADDDrm")>;
+def: InstRW<[SBWriteResGroup96], (instregex "PHADDSWrm128")>;
+def: InstRW<[SBWriteResGroup96], (instregex "PHADDWrm")>;
+def: InstRW<[SBWriteResGroup96], (instregex "PHSUBDrm")>;
+def: InstRW<[SBWriteResGroup96], (instregex "PHSUBSWrm128")>;
+def: InstRW<[SBWriteResGroup96], (instregex "PHSUBWrm")>;
+def: InstRW<[SBWriteResGroup96], (instregex "VPHADDDrm")>;
+def: InstRW<[SBWriteResGroup96], (instregex "VPHADDSWrm128")>;
+def: InstRW<[SBWriteResGroup96], (instregex "VPHADDWrm")>;
+def: InstRW<[SBWriteResGroup96], (instregex "VPHSUBDrm")>;
+def: InstRW<[SBWriteResGroup96], (instregex "VPHSUBSWrm128")>;
+def: InstRW<[SBWriteResGroup96], (instregex "VPHSUBWrm")>;
+
+def SBWriteResGroup97 : SchedWriteRes<[SBPort1,SBPort4,SBPort23]> {
+ let Latency = 9;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,2];
+}
+def: InstRW<[SBWriteResGroup97], (instregex "IST_F16m")>;
+def: InstRW<[SBWriteResGroup97], (instregex "IST_F32m")>;
+def: InstRW<[SBWriteResGroup97], (instregex "IST_FP16m")>;
+def: InstRW<[SBWriteResGroup97], (instregex "IST_FP32m")>;
+def: InstRW<[SBWriteResGroup97], (instregex "IST_FP64m")>;
+def: InstRW<[SBWriteResGroup97], (instregex "SHL64mCL")>;
+def: InstRW<[SBWriteResGroup97], (instregex "SHL8mCL")>;
+
+def SBWriteResGroup98 : SchedWriteRes<[SBPort4,SBPort23,SBPort015]> {
+ let Latency = 9;
+ let NumMicroOps = 6;
+ let ResourceCycles = [1,2,3];
+}
+def: InstRW<[SBWriteResGroup98], (instregex "ADC64mi8")>;
+def: InstRW<[SBWriteResGroup98], (instregex "ADC8mi")>;
+def: InstRW<[SBWriteResGroup98], (instregex "SBB64mi8")>;
+def: InstRW<[SBWriteResGroup98], (instregex "SBB8mi")>;
+
+def SBWriteResGroup99 : SchedWriteRes<[SBPort4,SBPort23,SBPort0,SBPort015]> {
+ let Latency = 9;
+ let NumMicroOps = 6;
+ let ResourceCycles = [1,2,2,1];
+}
+def: InstRW<[SBWriteResGroup99], (instregex "ADC64mr")>;
+def: InstRW<[SBWriteResGroup99], (instregex "ADC8mr")>;
+def: InstRW<[SBWriteResGroup99], (instregex "SBB64mr")>;
+def: InstRW<[SBWriteResGroup99], (instregex "SBB8mr")>;
+
+def SBWriteResGroup100 : SchedWriteRes<[SBPort4,SBPort5,SBPort23,SBPort0,SBPort015]> {
+ let Latency = 9;
+ let NumMicroOps = 6;
+ let ResourceCycles = [1,1,2,1,1];
+}
+def: InstRW<[SBWriteResGroup100], (instregex "BT64mr")>;
+def: InstRW<[SBWriteResGroup100], (instregex "BTC64mr")>;
+def: InstRW<[SBWriteResGroup100], (instregex "BTR64mr")>;
+def: InstRW<[SBWriteResGroup100], (instregex "BTS64mr")>;
+
+def SBWriteResGroup101 : SchedWriteRes<[SBPort1,SBPort23]> {
+ let Latency = 10;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup101], (instregex "ADD_F32m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "ADD_F64m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "ILD_F16m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "ILD_F32m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "ILD_F64m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "SUBR_F32m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "SUBR_F64m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "SUB_F32m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "SUB_F64m")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VADDPDYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VADDPSYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VADDSUBPDYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VADDSUBPSYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VCMPPDYrmi")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VCMPPSYrmi")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VCVTDQ2PSYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VCVTPS2DQYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VCVTTPS2DQrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VMAXPDYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VMAXPSYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VMINPDrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VMINPSrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VROUNDPDm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VROUNDPSm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VSUBPDYrm")>;
+def: InstRW<[SBWriteResGroup101], (instregex "VSUBPSYrm")>;
+
+def SBWriteResGroup102 : SchedWriteRes<[SBPort0,SBPort1,SBPort23]> {
+ let Latency = 10;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup102], (instregex "VCVTSD2SI64rm")>;
+def: InstRW<[SBWriteResGroup102], (instregex "VCVTSD2SI64rr")>;
+def: InstRW<[SBWriteResGroup102], (instregex "VCVTSS2SI64rm")>;
+def: InstRW<[SBWriteResGroup102], (instregex "VCVTSS2SIrm")>;
+def: InstRW<[SBWriteResGroup102], (instregex "VCVTTSD2SI64rm")>;
+def: InstRW<[SBWriteResGroup102], (instregex "VCVTTSD2SI64rr")>;
+def: InstRW<[SBWriteResGroup102], (instregex "VCVTTSS2SI64rm")>;
+def: InstRW<[SBWriteResGroup102], (instregex "VCVTTSS2SIrm")>;
+
+def SBWriteResGroup103 : SchedWriteRes<[SBPort1,SBPort5,SBPort23]> {
+ let Latency = 10;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup103], (instregex "CVTDQ2PDrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "CVTPD2DQrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "CVTPD2PSrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "CVTSD2SSrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "CVTSI2SS64rm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "CVTSI2SSrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "CVTTPD2DQrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "MMX_CVTPD2PIirm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "MMX_CVTPI2PDirm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "MMX_CVTTPD2PIirm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTDQ2PDYrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTDQ2PDrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTPD2DQrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTPD2PSrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTSD2SSrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTSI2SS64rm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTSI2SSrm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTTPD2DQrm")>;
+
+def SBWriteResGroup104 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 11;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup104], (instregex "MULPDrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "MULPSrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "MULSDrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "MULSSrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "PCMPGTQrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "PHMINPOSUWrm128")>;
+def: InstRW<[SBWriteResGroup104], (instregex "RCPPSm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "RCPSSm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "RSQRTPSm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "RSQRTSSm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VMULPDrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VMULPSrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VMULSDrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VMULSSrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VPCMPGTQrm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VPHMINPOSUWrm128")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VRCPPSm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VRCPSSm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VRSQRTPSm")>;
+def: InstRW<[SBWriteResGroup104], (instregex "VRSQRTSSm")>;
+
+def SBWriteResGroup105 : SchedWriteRes<[SBPort0]> {
+ let Latency = 11;
+ let NumMicroOps = 3;
+ let ResourceCycles = [3];
+}
+def: InstRW<[SBWriteResGroup105], (instregex "PCMPISTRIrr")>;
+def: InstRW<[SBWriteResGroup105], (instregex "PCMPISTRM128rr")>;
+def: InstRW<[SBWriteResGroup105], (instregex "VPCMPISTRIrr")>;
+def: InstRW<[SBWriteResGroup105], (instregex "VPCMPISTRM128rr")>;
+
+def SBWriteResGroup106 : SchedWriteRes<[SBPort1,SBPort23]> {
+ let Latency = 11;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
+}
+def: InstRW<[SBWriteResGroup106], (instregex "FICOM16m")>;
+def: InstRW<[SBWriteResGroup106], (instregex "FICOM32m")>;
+def: InstRW<[SBWriteResGroup106], (instregex "FICOMP16m")>;
+def: InstRW<[SBWriteResGroup106], (instregex "FICOMP32m")>;
+
+def SBWriteResGroup107 : SchedWriteRes<[SBPort1,SBPort5,SBPort23]> {
+ let Latency = 11;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup107], (instregex "VCVTPD2DQYrm")>;
+def: InstRW<[SBWriteResGroup107], (instregex "VCVTPD2PSYrm")>;
+def: InstRW<[SBWriteResGroup107], (instregex "VCVTTPD2DQYrm")>;
+
+def SBWriteResGroup108 : SchedWriteRes<[SBPort0,SBPort23,SBPort15]> {
+ let Latency = 11;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,2];
+}
+def: InstRW<[SBWriteResGroup108], (instregex "MPSADBWrmi")>;
+def: InstRW<[SBWriteResGroup108], (instregex "VMPSADBWrmi")>;
+
+def SBWriteResGroup109 : SchedWriteRes<[SBPort1,SBPort5,SBPort23]> {
+ let Latency = 11;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,2,1];
+}
+def: InstRW<[SBWriteResGroup109], (instregex "HADDPDrm")>;
+def: InstRW<[SBWriteResGroup109], (instregex "HADDPSrm")>;
+def: InstRW<[SBWriteResGroup109], (instregex "HSUBPDrm")>;
+def: InstRW<[SBWriteResGroup109], (instregex "HSUBPSrm")>;
+def: InstRW<[SBWriteResGroup109], (instregex "VHADDPDrm")>;
+def: InstRW<[SBWriteResGroup109], (instregex "VHADDPSrm")>;
+def: InstRW<[SBWriteResGroup109], (instregex "VHSUBPDrm")>;
+def: InstRW<[SBWriteResGroup109], (instregex "VHSUBPSrm")>;
+
+def SBWriteResGroup110 : SchedWriteRes<[SBPort5]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
+ let ResourceCycles = [2];
+}
+def: InstRW<[SBWriteResGroup110], (instregex "AESIMCrr")>;
+def: InstRW<[SBWriteResGroup110], (instregex "VAESIMCrr")>;
+
+def SBWriteResGroup111 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup111], (instregex "MUL_F32m")>;
+def: InstRW<[SBWriteResGroup111], (instregex "MUL_F64m")>;
+def: InstRW<[SBWriteResGroup111], (instregex "VMULPDYrm")>;
+def: InstRW<[SBWriteResGroup111], (instregex "VMULPSYrm")>;
+
+def SBWriteResGroup112 : SchedWriteRes<[SBPort0,SBPort1,SBPort5]> {
+ let Latency = 12;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,2,1];
+}
+def: InstRW<[SBWriteResGroup112], (instregex "DPPSrri")>;
+def: InstRW<[SBWriteResGroup112], (instregex "VDPPSYrri")>;
+def: InstRW<[SBWriteResGroup112], (instregex "VDPPSrri")>;
+
+def SBWriteResGroup113 : SchedWriteRes<[SBPort1,SBPort5,SBPort23]> {
+ let Latency = 12;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,2,1];
+}
+def: InstRW<[SBWriteResGroup113], (instregex "VHADDPDrm")>;
+def: InstRW<[SBWriteResGroup113], (instregex "VHADDPSYrm")>;
+def: InstRW<[SBWriteResGroup113], (instregex "VHSUBPDYrm")>;
+def: InstRW<[SBWriteResGroup113], (instregex "VHSUBPSYrm")>;
+
+def SBWriteResGroup114 : SchedWriteRes<[SBPort1,SBPort23]> {
+ let Latency = 13;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
+}
+def: InstRW<[SBWriteResGroup114], (instregex "ADD_FI16m")>;
+def: InstRW<[SBWriteResGroup114], (instregex "ADD_FI32m")>;
+def: InstRW<[SBWriteResGroup114], (instregex "SUBR_FI16m")>;
+def: InstRW<[SBWriteResGroup114], (instregex "SUBR_FI32m")>;
+def: InstRW<[SBWriteResGroup114], (instregex "SUB_FI16m")>;
+def: InstRW<[SBWriteResGroup114], (instregex "SUB_FI32m")>;
+
+def SBWriteResGroup115 : SchedWriteRes<[SBPort5,SBPort23,SBPort015]> {
+ let Latency = 13;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup115], (instregex "AESDECLASTrm")>;
+def: InstRW<[SBWriteResGroup115], (instregex "AESDECrm")>;
+def: InstRW<[SBWriteResGroup115], (instregex "AESENCLASTrm")>;
+def: InstRW<[SBWriteResGroup115], (instregex "AESENCrm")>;
+def: InstRW<[SBWriteResGroup115], (instregex "VAESDECLASTrm")>;
+def: InstRW<[SBWriteResGroup115], (instregex "VAESDECrm")>;
+def: InstRW<[SBWriteResGroup115], (instregex "VAESENCLASTrm")>;
+def: InstRW<[SBWriteResGroup115], (instregex "VAESENCrm")>;
+
+def SBWriteResGroup116 : SchedWriteRes<[SBPort0]> {
+ let Latency = 14;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup116], (instregex "DIVPSrr")>;
+def: InstRW<[SBWriteResGroup116], (instregex "DIVSSrr")>;
+def: InstRW<[SBWriteResGroup116], (instregex "SQRTPSr")>;
+def: InstRW<[SBWriteResGroup116], (instregex "SQRTSSr")>;
+def: InstRW<[SBWriteResGroup116], (instregex "VDIVPSrr")>;
+def: InstRW<[SBWriteResGroup116], (instregex "VDIVSSrr")>;
+def: InstRW<[SBWriteResGroup116], (instregex "VSQRTPSr")>;
+
+def SBWriteResGroup117 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 14;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup117], (instregex "VSQRTSSm")>;
+
+def SBWriteResGroup118 : SchedWriteRes<[SBPort0,SBPort23,SBPort0]> {
+ let Latency = 14;
+ let NumMicroOps = 4;
+ let ResourceCycles = [2,1,1];
+}
+def: InstRW<[SBWriteResGroup118], (instregex "VRCPPSm")>;
+def: InstRW<[SBWriteResGroup118], (instregex "VRSQRTPSYm")>;
+
+def SBWriteResGroup119 : SchedWriteRes<[SBPort0,SBPort1,SBPort23]> {
+ let Latency = 15;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup119], (instregex "MUL_FI16m")>;
+def: InstRW<[SBWriteResGroup119], (instregex "MUL_FI32m")>;
+
+def SBWriteResGroup120 : SchedWriteRes<[SBPort0,SBPort1,SBPort5,SBPort23]> {
+ let Latency = 15;
+ let NumMicroOps = 4;
+ let ResourceCycles = [1,1,1,1];
+}
+def: InstRW<[SBWriteResGroup120], (instregex "DPPDrmi")>;
+def: InstRW<[SBWriteResGroup120], (instregex "VDPPDrmi")>;
+
+def SBWriteResGroup121 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 17;
+ let NumMicroOps = 4;
+ let ResourceCycles = [3,1];
+}
+def: InstRW<[SBWriteResGroup121], (instregex "PCMPISTRIrm")>;
+def: InstRW<[SBWriteResGroup121], (instregex "PCMPISTRM128rm")>;
+def: InstRW<[SBWriteResGroup121], (instregex "VPCMPISTRIrm")>;
+def: InstRW<[SBWriteResGroup121], (instregex "VPCMPISTRM128rm")>;
+
+def SBWriteResGroup122 : SchedWriteRes<[SBPort5,SBPort23]> {
+ let Latency = 18;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
+}
+def: InstRW<[SBWriteResGroup122], (instregex "AESIMCrm")>;
+def: InstRW<[SBWriteResGroup122], (instregex "VAESIMCrm")>;
+
+def SBWriteResGroup123 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 20;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup123], (instregex "DIVPSrm")>;
+def: InstRW<[SBWriteResGroup123], (instregex "DIVSSrm")>;
+def: InstRW<[SBWriteResGroup123], (instregex "SQRTPSm")>;
+def: InstRW<[SBWriteResGroup123], (instregex "SQRTSSm")>;
+def: InstRW<[SBWriteResGroup123], (instregex "VDIVPSrm")>;
+def: InstRW<[SBWriteResGroup123], (instregex "VDIVSSrm")>;
+def: InstRW<[SBWriteResGroup123], (instregex "VSQRTPSm")>;
+
+def SBWriteResGroup124 : SchedWriteRes<[SBPort0]> {
+ let Latency = 21;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup124], (instregex "VSQRTSDr")>;
+
+def SBWriteResGroup125 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 21;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup125], (instregex "VSQRTSDm")>;
+
+def SBWriteResGroup126 : SchedWriteRes<[SBPort0]> {
+ let Latency = 22;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup126], (instregex "DIVPDrr")>;
+def: InstRW<[SBWriteResGroup126], (instregex "DIVSDrr")>;
+def: InstRW<[SBWriteResGroup126], (instregex "SQRTPDr")>;
+def: InstRW<[SBWriteResGroup126], (instregex "SQRTSDr")>;
+def: InstRW<[SBWriteResGroup126], (instregex "VDIVPDrr")>;
+def: InstRW<[SBWriteResGroup126], (instregex "VDIVSDrr")>;
+def: InstRW<[SBWriteResGroup126], (instregex "VSQRTPDr")>;
+
+def SBWriteResGroup127 : SchedWriteRes<[SBPort0]> {
+ let Latency = 24;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup127], (instregex "DIVR_FPrST0")>;
+def: InstRW<[SBWriteResGroup127], (instregex "DIVR_FST0r")>;
+def: InstRW<[SBWriteResGroup127], (instregex "DIVR_FrST0")>;
+def: InstRW<[SBWriteResGroup127], (instregex "DIV_FPrST0")>;
+def: InstRW<[SBWriteResGroup127], (instregex "DIV_FST0r")>;
+def: InstRW<[SBWriteResGroup127], (instregex "DIV_FrST0")>;
+
+def SBWriteResGroup128 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 28;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup128], (instregex "DIVPDrm")>;
+def: InstRW<[SBWriteResGroup128], (instregex "DIVSDrm")>;
+def: InstRW<[SBWriteResGroup128], (instregex "SQRTPDm")>;
+def: InstRW<[SBWriteResGroup128], (instregex "SQRTSDm")>;
+def: InstRW<[SBWriteResGroup128], (instregex "VDIVPDrm")>;
+def: InstRW<[SBWriteResGroup128], (instregex "VDIVSDrm")>;
+def: InstRW<[SBWriteResGroup128], (instregex "VSQRTPDm")>;
+
+def SBWriteResGroup129 : SchedWriteRes<[SBPort0,SBPort0]> {
+ let Latency = 29;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
+}
+def: InstRW<[SBWriteResGroup129], (instregex "VDIVPSYrr")>;
+def: InstRW<[SBWriteResGroup129], (instregex "VSQRTPSYr")>;
+
+def SBWriteResGroup130 : SchedWriteRes<[SBPort0,SBPort23]> {
+ let Latency = 31;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,1];
+}
+def: InstRW<[SBWriteResGroup130], (instregex "DIVR_F32m")>;
+def: InstRW<[SBWriteResGroup130], (instregex "DIVR_F64m")>;
+def: InstRW<[SBWriteResGroup130], (instregex "DIV_F32m")>;
+def: InstRW<[SBWriteResGroup130], (instregex "DIV_F64m")>;
+
+def SBWriteResGroup131 : SchedWriteRes<[SBPort0,SBPort1,SBPort23]> {
+ let Latency = 34;
+ let NumMicroOps = 3;
+ let ResourceCycles = [1,1,1];
+}
+def: InstRW<[SBWriteResGroup131], (instregex "DIVR_FI16m")>;
+def: InstRW<[SBWriteResGroup131], (instregex "DIVR_FI32m")>;
+def: InstRW<[SBWriteResGroup131], (instregex "DIV_FI16m")>;
+def: InstRW<[SBWriteResGroup131], (instregex "DIV_FI32m")>;
+
+def SBWriteResGroup132 : SchedWriteRes<[SBPort0,SBPort23,SBPort0]> {
+ let Latency = 36;
+ let NumMicroOps = 4;
+ let ResourceCycles = [2,1,1];
+}
+def: InstRW<[SBWriteResGroup132], (instregex "VDIVPSYrm")>;
+def: InstRW<[SBWriteResGroup132], (instregex "VSQRTPSYm")>;
+
+def SBWriteResGroup133 : SchedWriteRes<[SBPort0,SBPort0]> {
+ let Latency = 45;
+ let NumMicroOps = 3;
+ let ResourceCycles = [2,1];
+}
+def: InstRW<[SBWriteResGroup133], (instregex "VDIVPDYrr")>;
+def: InstRW<[SBWriteResGroup133], (instregex "VSQRTPDYr")>;
+
+def SBWriteResGroup134 : SchedWriteRes<[SBPort0,SBPort23,SBPort0]> {
+ let Latency = 52;
+ let NumMicroOps = 4;
+ let ResourceCycles = [2,1,1];
+}
+def: InstRW<[SBWriteResGroup134], (instregex "VDIVPDYrm")>;
+def: InstRW<[SBWriteResGroup134], (instregex "VSQRTPDYm")>;
+
+def SBWriteResGroup135 : SchedWriteRes<[SBPort0]> {
+ let Latency = 114;
+ let NumMicroOps = 1;
+ let ResourceCycles = [1];
+}
+def: InstRW<[SBWriteResGroup135], (instregex "VSQRTSSr")>;
+
} // SchedModel
diff --git a/lib/Target/X86/X86ScheduleBtVer2.td b/lib/Target/X86/X86ScheduleBtVer2.td
index 6cb2a3694d92..ed53893b779c 100644
--- a/lib/Target/X86/X86ScheduleBtVer2.td
+++ b/lib/Target/X86/X86ScheduleBtVer2.td
@@ -369,5 +369,82 @@ def : WriteRes<WriteSystem, [JAny]> { let Latency = 100; }
def : WriteRes<WriteMicrocoded, [JAny]> { let Latency = 100; }
def : WriteRes<WriteFence, [JSAGU]>;
def : WriteRes<WriteNop, []>;
+
+////////////////////////////////////////////////////////////////////////////////
+// AVX instructions.
+////////////////////////////////////////////////////////////////////////////////
+
+def WriteFAddY: SchedWriteRes<[JFPU0]> {
+ let Latency = 3;
+ let ResourceCycles = [2];
+}
+def : InstRW<[WriteFAddY], (instregex "VADD(SUB)?P(S|D)Yrr", "VSUBP(S|D)Yrr")>;
+
+def WriteFAddYLd: SchedWriteRes<[JLAGU, JFPU0]> {
+ let Latency = 8;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[WriteFAddYLd, ReadAfterLd], (instregex "VADD(SUB)?P(S|D)Yrm", "VSUBP(S|D)Yrm")>;
+
+def WriteFDivY: SchedWriteRes<[JFPU1]> {
+ let Latency = 38;
+ let ResourceCycles = [38];
+}
+def : InstRW<[WriteFDivY], (instregex "VDIVP(D|S)Yrr")>;
+
+def WriteFDivYLd: SchedWriteRes<[JLAGU, JFPU1]> {
+ let Latency = 43;
+ let ResourceCycles = [1, 38];
+}
+def : InstRW<[WriteFDivYLd, ReadAfterLd], (instregex "VDIVP(S|D)Yrm")>;
+
+def WriteVMULYPD: SchedWriteRes<[JFPU1]> {
+ let Latency = 4;
+ let ResourceCycles = [4];
+}
+def : InstRW<[WriteVMULYPD], (instregex "VMULPDYrr")>;
+
+def WriteVMULYPDLd: SchedWriteRes<[JLAGU, JFPU1]> {
+ let Latency = 9;
+ let ResourceCycles = [1, 4];
+}
+def : InstRW<[WriteVMULYPDLd, ReadAfterLd], (instregex "VMULPDYrm")>;
+
+def WriteVMULYPS: SchedWriteRes<[JFPU1]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+def : InstRW<[WriteVMULYPS], (instregex "VMULPSYrr", "VRCPPSYr", "VRSQRTPSYr")>;
+
+def WriteVMULYPSLd: SchedWriteRes<[JLAGU, JFPU1]> {
+ let Latency = 7;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[WriteVMULYPSLd, ReadAfterLd], (instregex "VMULPSYrm", "VRCPPSYm", "VRSQRTPSYm")>;
+
+def WriteVSQRTYPD: SchedWriteRes<[JFPU1]> {
+ let Latency = 54;
+ let ResourceCycles = [54];
+}
+def : InstRW<[WriteVSQRTYPD], (instregex "VSQRTPDYr")>;
+
+def WriteVSQRTYPDLd: SchedWriteRes<[JLAGU, JFPU1]> {
+ let Latency = 59;
+ let ResourceCycles = [1, 54];
+}
+def : InstRW<[WriteVSQRTYPDLd], (instregex "VSQRTPDYm")>;
+
+def WriteVSQRTYPS: SchedWriteRes<[JFPU1]> {
+ let Latency = 42;
+ let ResourceCycles = [42];
+}
+def : InstRW<[WriteVSQRTYPS], (instregex "VSQRTPSYr")>;
+
+def WriteVSQRTYPSLd: SchedWriteRes<[JLAGU, JFPU1]> {
+ let Latency = 47;
+ let ResourceCycles = [1, 42];
+}
+def : InstRW<[WriteVSQRTYPSLd], (instregex "VSQRTPSYm")>;
+
} // SchedModel
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index 5ba8534d32d3..c9924f264939 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -142,10 +142,15 @@ int X86TTIImpl::getArithmeticInstrCost(
{ ISD::FDIV, MVT::v2f64, 69 }, // divpd
{ ISD::FADD, MVT::v2f64, 2 }, // addpd
{ ISD::FSUB, MVT::v2f64, 2 }, // subpd
- // v2i64/v4i64 mul is custom lowered as a series of long
- // multiplies(3), shifts(3) and adds(2).
- // slm muldq version throughput is 2
- { ISD::MUL, MVT::v2i64, 11 },
+ // v2i64/v4i64 mul is custom lowered as a series of long:
+ // multiplies(3), shifts(3) and adds(2)
+ // slm muldq version throughput is 2 and addq throughput 4
+ // thus: 3X2 (muldq throughput) + 3X1 (shift throuput) +
+ // 3X4 (addq throughput) = 17
+ { ISD::MUL, MVT::v2i64, 17 },
+ // slm addq\subq throughput is 4
+ { ISD::ADD, MVT::v2i64, 4 },
+ { ISD::SUB, MVT::v2i64, 4 },
};
if (ST->isSLM()) {
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index d8cf8d3f5da2..53223ab44316 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -124,6 +124,10 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
} else if (I->use_empty()) {
// Dead argument (which are always marked as promotable)
++NumArgumentsDead;
+
+ // There may be remaining metadata uses of the argument for things like
+ // llvm.dbg.value. Replace them with undef.
+ I->replaceAllUsesWith(UndefValue::get(I->getType()));
} else {
// Okay, this is being promoted. This means that the only uses are loads
// or GEPs which are only used by loads
diff --git a/lib/Transforms/IPO/FunctionImport.cpp b/lib/Transforms/IPO/FunctionImport.cpp
index 6d34ab8b0d96..233a36d2bc54 100644
--- a/lib/Transforms/IPO/FunctionImport.cpp
+++ b/lib/Transforms/IPO/FunctionImport.cpp
@@ -64,6 +64,12 @@ static cl::opt<float> ImportHotMultiplier(
"import-hot-multiplier", cl::init(3.0), cl::Hidden, cl::value_desc("x"),
cl::desc("Multiply the `import-instr-limit` threshold for hot callsites"));
+static cl::opt<float> ImportCriticalMultiplier(
+ "import-critical-multiplier", cl::init(100.0), cl::Hidden,
+ cl::value_desc("x"),
+ cl::desc(
+ "Multiply the `import-instr-limit` threshold for critical callsites"));
+
// FIXME: This multiplier was not really tuned up.
static cl::opt<float> ImportColdMultiplier(
"import-cold-multiplier", cl::init(0), cl::Hidden, cl::value_desc("N"),
@@ -207,6 +213,8 @@ static void computeImportForFunction(
return ImportHotMultiplier;
if (Hotness == CalleeInfo::HotnessType::Cold)
return ImportColdMultiplier;
+ if (Hotness == CalleeInfo::HotnessType::Critical)
+ return ImportCriticalMultiplier;
return 1.0;
};
@@ -537,8 +545,6 @@ void llvm::thinLTOResolveWeakForLinkerModule(
};
auto updateLinkage = [&](GlobalValue &GV) {
- if (!GlobalValue::isWeakForLinker(GV.getLinkage()))
- return;
// See if the global summary analysis computed a new resolved linkage.
const auto &GS = DefinedGlobals.find(GV.getGUID());
if (GS == DefinedGlobals.end())
@@ -546,6 +552,21 @@ void llvm::thinLTOResolveWeakForLinkerModule(
auto NewLinkage = GS->second->linkage();
if (NewLinkage == GV.getLinkage())
return;
+
+ // Switch the linkage to weakany if asked for, e.g. we do this for
+ // linker redefined symbols (via --wrap or --defsym).
+ // We record that the visibility should be changed here in `addThinLTO`
+ // as we need access to the resolution vectors for each input file in
+ // order to find which symbols have been redefined.
+ // We may consider reorganizing this code and moving the linkage recording
+ // somewhere else, e.g. in thinLTOResolveWeakForLinkerInIndex.
+ if (NewLinkage == GlobalValue::WeakAnyLinkage) {
+ GV.setLinkage(NewLinkage);
+ return;
+ }
+
+ if (!GlobalValue::isWeakForLinker(GV.getLinkage()))
+ return;
// Check for a non-prevailing def that has interposable linkage
// (e.g. non-odr weak or linkonce). In that case we can't simply
// convert to available_externally, since it would lose the
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index f277a51ae659..3d57acf06e74 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -837,7 +837,7 @@ OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
// The global is initialized when the store to it occurs.
new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
- SI->getOrdering(), SI->getSynchScope(), SI);
+ SI->getOrdering(), SI->getSyncScopeID(), SI);
SI->eraseFromParent();
continue;
}
@@ -854,7 +854,7 @@ OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
// Replace the cmp X, 0 with a use of the bool value.
// Sink the load to where the compare was, if atomic rules allow us to.
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
- LI->getOrdering(), LI->getSynchScope(),
+ LI->getOrdering(), LI->getSyncScopeID(),
LI->isUnordered() ? (Instruction*)ICI : LI);
InitBoolUsed = true;
switch (ICI->getPredicate()) {
@@ -1605,7 +1605,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
assert(LI->getOperand(0) == GV && "Not a copy!");
// Insert a new load, to preserve the saved value.
StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
- LI->getOrdering(), LI->getSynchScope(), LI);
+ LI->getOrdering(), LI->getSyncScopeID(), LI);
} else {
assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
"This is not a form that we understand!");
@@ -1614,12 +1614,12 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
}
}
new StoreInst(StoreVal, NewGV, false, 0,
- SI->getOrdering(), SI->getSynchScope(), SI);
+ SI->getOrdering(), SI->getSyncScopeID(), SI);
} else {
// Change the load into a load of bool then a select.
LoadInst *LI = cast<LoadInst>(UI);
LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
- LI->getOrdering(), LI->getSynchScope(), LI);
+ LI->getOrdering(), LI->getSyncScopeID(), LI);
Value *NSI;
if (IsOneZero)
NSI = new ZExtInst(NLI, LI->getType(), "", LI);
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index ad89e40661c6..00ddb93df830 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -989,5 +989,13 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// And delete the actual function from the module.
M.getFunctionList().erase(DeadF);
}
- return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
+
+ if (!Changed)
+ return PreservedAnalyses::all();
+
+ // Even if we change the IR, we update the core CGSCC data structures and so
+ // can preserve the proxy to the function analysis manager.
+ PreservedAnalyses PA;
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ return PA;
}
diff --git a/lib/Transforms/IPO/LowerTypeTests.cpp b/lib/Transforms/IPO/LowerTypeTests.cpp
index b406c22c69d7..693df5e7ba92 100644
--- a/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -855,15 +855,20 @@ void LowerTypeTestsModule::importFunction(Function *F, bool isDefinition) {
FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
Name + ".cfi_jt", &M);
FDecl->setVisibility(GlobalValue::HiddenVisibility);
- } else {
- // Definition.
- assert(isDefinition);
+ } else if (isDefinition) {
F->setName(Name + ".cfi");
F->setLinkage(GlobalValue::ExternalLinkage);
F->setVisibility(GlobalValue::HiddenVisibility);
FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
Name, &M);
FDecl->setVisibility(Visibility);
+ } else {
+ // Function definition without type metadata, where some other translation
+ // unit contained a declaration with type metadata. This normally happens
+ // during mixed CFI + non-CFI compilation. We do nothing with the function
+ // so that it is treated the same way as a function defined outside of the
+ // LTO unit.
+ return;
}
if (F->isWeakForLinker())
diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp
index 5b1b58b89c32..0b319f6a488b 100644
--- a/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -188,6 +188,13 @@ PassManagerBuilder::~PassManagerBuilder() {
static ManagedStatic<SmallVector<std::pair<PassManagerBuilder::ExtensionPointTy,
PassManagerBuilder::ExtensionFn>, 8> > GlobalExtensions;
+/// Check if GlobalExtensions is constructed and not empty.
+/// Since GlobalExtensions is a managed static, calling 'empty()' will trigger
+/// the construction of the object.
+static bool GlobalExtensionsNotEmpty() {
+ return GlobalExtensions.isConstructed() && !GlobalExtensions->empty();
+}
+
void PassManagerBuilder::addGlobalExtension(
PassManagerBuilder::ExtensionPointTy Ty,
PassManagerBuilder::ExtensionFn Fn) {
@@ -200,9 +207,12 @@ void PassManagerBuilder::addExtension(ExtensionPointTy Ty, ExtensionFn Fn) {
void PassManagerBuilder::addExtensionsToPM(ExtensionPointTy ETy,
legacy::PassManagerBase &PM) const {
- for (unsigned i = 0, e = GlobalExtensions->size(); i != e; ++i)
- if ((*GlobalExtensions)[i].first == ETy)
- (*GlobalExtensions)[i].second(*this, PM);
+ if (GlobalExtensionsNotEmpty()) {
+ for (auto &Ext : *GlobalExtensions) {
+ if (Ext.first == ETy)
+ Ext.second(*this, PM);
+ }
+ }
for (unsigned i = 0, e = Extensions.size(); i != e; ++i)
if (Extensions[i].first == ETy)
Extensions[i].second(*this, PM);
@@ -415,7 +425,7 @@ void PassManagerBuilder::populateModulePassManager(
// builds. The function merging pass is
if (MergeFunctions)
MPM.add(createMergeFunctionsPass());
- else if (!GlobalExtensions->empty() || !Extensions.empty())
+ else if (GlobalExtensionsNotEmpty() || !Extensions.empty())
MPM.add(createBarrierNoopPass());
addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
diff --git a/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
index 8d494fe9cde2..8ef6bb652309 100644
--- a/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
+++ b/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
@@ -271,7 +271,8 @@ void splitAndWriteThinLTOBitcode(
if (!ArgT || ArgT->getBitWidth() > 64)
return;
}
- if (computeFunctionBodyMemoryAccess(*F, AARGetter(*F)) == MAK_ReadNone)
+ if (!F->isDeclaration() &&
+ computeFunctionBodyMemoryAccess(*F, AARGetter(*F)) == MAK_ReadNone)
EligibleVirtualFns.insert(F);
});
}
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index d5f0dd191415..809471cfd74f 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -164,7 +164,7 @@ namespace {
///
class FAddCombine {
public:
- FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(nullptr) {}
+ FAddCombine(InstCombiner::BuilderTy &B) : Builder(B), Instr(nullptr) {}
Value *simplify(Instruction *FAdd);
private:
@@ -187,7 +187,7 @@ namespace {
Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
- InstCombiner::BuilderTy *Builder;
+ InstCombiner::BuilderTy &Builder;
Instruction *Instr;
// Debugging stuff are clustered here.
@@ -735,7 +735,7 @@ Value *FAddCombine::createNaryFAdd
}
Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
- Value *V = Builder->CreateFSub(Opnd0, Opnd1);
+ Value *V = Builder.CreateFSub(Opnd0, Opnd1);
if (Instruction *I = dyn_cast<Instruction>(V))
createInstPostProc(I);
return V;
@@ -750,21 +750,21 @@ Value *FAddCombine::createFNeg(Value *V) {
}
Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
- Value *V = Builder->CreateFAdd(Opnd0, Opnd1);
+ Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
if (Instruction *I = dyn_cast<Instruction>(V))
createInstPostProc(I);
return V;
}
Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
- Value *V = Builder->CreateFMul(Opnd0, Opnd1);
+ Value *V = Builder.CreateFMul(Opnd0, Opnd1);
if (Instruction *I = dyn_cast<Instruction>(V))
createInstPostProc(I);
return V;
}
Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) {
- Value *V = Builder->CreateFDiv(Opnd0, Opnd1);
+ Value *V = Builder.CreateFDiv(Opnd0, Opnd1);
if (Instruction *I = dyn_cast<Instruction>(V))
createInstPostProc(I);
return V;
@@ -895,7 +895,7 @@ bool InstCombiner::willNotOverflowUnsignedSub(const Value *LHS,
// ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
// XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
static Value *checkForNegativeOperand(BinaryOperator &I,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
// This function creates 2 instructions to replace ADD, we need at least one
@@ -919,13 +919,13 @@ static Value *checkForNegativeOperand(BinaryOperator &I,
// X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
// ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
- Value *NewAnd = Builder->CreateAnd(Z, *C1);
- return Builder->CreateSub(RHS, NewAnd, "sub");
+ Value *NewAnd = Builder.CreateAnd(Z, *C1);
+ return Builder.CreateSub(RHS, NewAnd, "sub");
} else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
// X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
// ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
- Value *NewOr = Builder->CreateOr(Z, ~(*C1));
- return Builder->CreateSub(RHS, NewOr, "sub");
+ Value *NewOr = Builder.CreateOr(Z, ~(*C1));
+ return Builder.CreateSub(RHS, NewOr, "sub");
}
}
}
@@ -944,8 +944,8 @@ static Value *checkForNegativeOperand(BinaryOperator &I,
if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
if (C1->countTrailingZeros() == 0)
if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
- Value *NewOr = Builder->CreateOr(Z, ~(*C2));
- return Builder->CreateSub(RHS, NewOr, "sub");
+ Value *NewOr = Builder.CreateOr(Z, ~(*C2));
+ return Builder.CreateSub(RHS, NewOr, "sub");
}
return nullptr;
}
@@ -1027,7 +1027,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *V = SimplifyUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
- if (Instruction *X = foldAddWithConstant(I, *Builder))
+ if (Instruction *X = foldAddWithConstant(I, Builder))
return X;
// FIXME: This should be moved into the above helper function to allow these
@@ -1060,7 +1060,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (ExtendAmt) {
Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
- Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
+ Value *NewShl = Builder.CreateShl(XorLHS, ShAmt, "sext");
return BinaryOperator::CreateAShr(NewShl, ShAmt);
}
@@ -1084,7 +1084,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Instruction *NV = foldOpWithConstantIntoOperand(I))
return NV;
- if (I.getType()->getScalarType()->isIntegerTy(1))
+ if (I.getType()->isIntOrIntVectorTy(1))
return BinaryOperator::CreateXor(LHS, RHS);
// X + X --> X << 1
@@ -1101,7 +1101,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *LHSV = dyn_castNegVal(LHS)) {
if (!isa<Constant>(RHS))
if (Value *RHSV = dyn_castNegVal(RHS)) {
- Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
+ Value *NewAdd = Builder.CreateAdd(LHSV, RHSV, "sum");
return BinaryOperator::CreateNeg(NewAdd);
}
@@ -1148,7 +1148,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (AddRHSHighBits == AddRHSHighBitsAnd) {
// Okay, the xform is safe. Insert the new add pronto.
- Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
+ Value *NewAdd = Builder.CreateAdd(X, CRHS, LHS->getName());
return BinaryOperator::CreateAnd(NewAdd, C2);
}
}
@@ -1191,7 +1191,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
willNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
// Insert the new, smaller add.
Value *NewAdd =
- Builder->CreateNSWAdd(LHSConv->getOperand(0), CI, "addconv");
+ Builder.CreateNSWAdd(LHSConv->getOperand(0), CI, "addconv");
return new SExtInst(NewAdd, I.getType());
}
}
@@ -1208,7 +1208,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
willNotOverflowSignedAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), I)) {
// Insert the new integer add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
+ Value *NewAdd = Builder.CreateNSWAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), "addconv");
return new SExtInst(NewAdd, I.getType());
}
@@ -1227,7 +1227,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
willNotOverflowUnsignedAdd(LHSConv->getOperand(0), CI, I)) {
// Insert the new, smaller add.
Value *NewAdd =
- Builder->CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv");
+ Builder.CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv");
return new ZExtInst(NewAdd, I.getType());
}
}
@@ -1244,7 +1244,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
willNotOverflowUnsignedAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), I)) {
// Insert the new integer add.
- Value *NewAdd = Builder->CreateNUWAdd(
+ Value *NewAdd = Builder.CreateNUWAdd(
LHSConv->getOperand(0), RHSConv->getOperand(0), "addconv");
return new ZExtInst(NewAdd, I.getType());
}
@@ -1362,8 +1362,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
willNotOverflowSignedAdd(LHSIntVal, CI, I)) {
// Insert the new integer add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
- CI, "addconv");
+ Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv");
return new SIToFPInst(NewAdd, I.getType());
}
}
@@ -1381,8 +1380,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
(LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
// Insert the new integer add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
- RHSIntVal, "addconv");
+ Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv");
return new SIToFPInst(NewAdd, I.getType());
}
}
@@ -1480,14 +1478,14 @@ Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
// pointer, subtract it from the offset we have.
if (GEP2) {
Value *Offset = EmitGEPOffset(GEP2);
- Result = Builder->CreateSub(Result, Offset);
+ Result = Builder.CreateSub(Result, Offset);
}
// If we have p - gep(p, ...) then we have to negate the result.
if (Swapped)
- Result = Builder->CreateNeg(Result, "diff.neg");
+ Result = Builder.CreateNeg(Result, "diff.neg");
- return Builder->CreateIntCast(Result, Ty, true);
+ return Builder.CreateIntCast(Result, Ty, true);
}
Instruction *InstCombiner::visitSub(BinaryOperator &I) {
@@ -1522,7 +1520,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return Res;
}
- if (I.getType()->getScalarType()->isIntegerTy(1))
+ if (I.getType()->isIntOrIntVectorTy(1))
return BinaryOperator::CreateXor(Op0, Op1);
// Replace (-1 - A) with (~A).
@@ -1552,12 +1550,12 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// Fold (sub 0, (zext bool to B)) --> (sext bool to B)
if (C->isNullValue() && match(Op1, m_ZExt(m_Value(X))))
- if (X->getType()->getScalarType()->isIntegerTy(1))
+ if (X->getType()->isIntOrIntVectorTy(1))
return CastInst::CreateSExtOrBitCast(X, Op1->getType());
// Fold (sub 0, (sext bool to B)) --> (zext bool to B)
if (C->isNullValue() && match(Op1, m_SExt(m_Value(X))))
- if (X->getType()->getScalarType()->isIntegerTy(1))
+ if (X->getType()->isIntOrIntVectorTy(1))
return CastInst::CreateZExtOrBitCast(X, Op1->getType());
}
@@ -1615,7 +1613,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// ((X | Y) - X) --> (~X & Y)
if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
return BinaryOperator::CreateAnd(
- Y, Builder->CreateNot(Op1, Op1->getName() + ".not"));
+ Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
}
if (Op1->hasOneUse()) {
@@ -1625,13 +1623,13 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// (X - (Y - Z)) --> (X + (Z - Y)).
if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
return BinaryOperator::CreateAdd(Op0,
- Builder->CreateSub(Z, Y, Op1->getName()));
+ Builder.CreateSub(Z, Y, Op1->getName()));
// (X - (X & Y)) --> (X & ~Y)
//
if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0))))
return BinaryOperator::CreateAnd(Op0,
- Builder->CreateNot(Y, Y->getName() + ".not"));
+ Builder.CreateNot(Y, Y->getName() + ".not"));
// 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
@@ -1648,7 +1646,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// 'nuw' is dropped in favor of the canonical form.
if (match(Op1, m_SExt(m_Value(Y))) &&
Y->getType()->getScalarSizeInBits() == 1) {
- Value *Zext = Builder->CreateZExt(Y, I.getType());
+ Value *Zext = Builder.CreateZExt(Y, I.getType());
BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext);
Add->setHasNoSignedWrap(I.hasNoSignedWrap());
return Add;
@@ -1659,13 +1657,13 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
Value *A, *B;
Constant *CI;
if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B)))))
- return BinaryOperator::CreateAdd(Op0, Builder->CreateMul(A, B));
+ return BinaryOperator::CreateAdd(Op0, Builder.CreateMul(A, B));
// X - A*CI -> X + A*-CI
// No need to handle commuted multiply because multiply handling will
// ensure constant will be move to the right hand side.
if (match(Op1, m_Mul(m_Value(A), m_Constant(CI)))) {
- Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI));
+ Value *NewMul = Builder.CreateMul(A, ConstantExpr::getNeg(CI));
return BinaryOperator::CreateAdd(Op0, NewMul);
}
}
@@ -1729,14 +1727,14 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
}
if (FPTruncInst *FPTI = dyn_cast<FPTruncInst>(Op1)) {
if (Value *V = dyn_castFNegVal(FPTI->getOperand(0))) {
- Value *NewTrunc = Builder->CreateFPTrunc(V, I.getType());
+ Value *NewTrunc = Builder.CreateFPTrunc(V, I.getType());
Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewTrunc);
NewI->copyFastMathFlags(&I);
return NewI;
}
} else if (FPExtInst *FPEI = dyn_cast<FPExtInst>(Op1)) {
if (Value *V = dyn_castFNegVal(FPEI->getOperand(0))) {
- Value *NewExt = Builder->CreateFPExt(V, I.getType());
+ Value *NewExt = Builder.CreateFPExt(V, I.getType());
Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewExt);
NewI->copyFastMathFlags(&I);
return NewI;
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index db98be2c98f5..773c86e23707 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -54,17 +54,17 @@ static unsigned getFCmpCode(FCmpInst::Predicate CC) {
/// instruction. The sign is passed in to determine which kind of predicate to
/// use in the new icmp instruction.
static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
ICmpInst::Predicate NewPred;
if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred))
return NewConstant;
- return Builder->CreateICmp(NewPred, LHS, RHS);
+ return Builder.CreateICmp(NewPred, LHS, RHS);
}
/// This is the complement of getFCmpCode, which turns an opcode and two
/// operands into either a FCmp instruction, or a true/false constant.
static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
const auto Pred = static_cast<FCmpInst::Predicate>(Code);
assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE &&
"Unexpected FCmp predicate!");
@@ -72,53 +72,45 @@ static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
if (Pred == FCmpInst::FCMP_TRUE)
return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
- return Builder->CreateFCmp(Pred, LHS, RHS);
+ return Builder.CreateFCmp(Pred, LHS, RHS);
}
-/// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) to BSWAP(BITWISE_OP(A, B))
+/// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or
+/// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B))
/// \param I Binary operator to transform.
/// \return Pointer to node that must replace the original binary operator, or
/// null pointer if no transformation was made.
-Value *InstCombiner::SimplifyBSwap(BinaryOperator &I) {
- IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
-
- // Can't do vectors.
- if (I.getType()->isVectorTy())
- return nullptr;
-
- // Can only do bitwise ops.
- if (!I.isBitwiseLogicOp())
- return nullptr;
+static Value *SimplifyBSwap(BinaryOperator &I,
+ InstCombiner::BuilderTy &Builder) {
+ assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying");
Value *OldLHS = I.getOperand(0);
Value *OldRHS = I.getOperand(1);
- ConstantInt *ConstLHS = dyn_cast<ConstantInt>(OldLHS);
- ConstantInt *ConstRHS = dyn_cast<ConstantInt>(OldRHS);
- IntrinsicInst *IntrLHS = dyn_cast<IntrinsicInst>(OldLHS);
- IntrinsicInst *IntrRHS = dyn_cast<IntrinsicInst>(OldRHS);
- bool IsBswapLHS = (IntrLHS && IntrLHS->getIntrinsicID() == Intrinsic::bswap);
- bool IsBswapRHS = (IntrRHS && IntrRHS->getIntrinsicID() == Intrinsic::bswap);
-
- if (!IsBswapLHS && !IsBswapRHS)
- return nullptr;
-
- if (!IsBswapLHS && !ConstLHS)
- return nullptr;
- if (!IsBswapRHS && !ConstRHS)
+ Value *NewLHS;
+ if (!match(OldLHS, m_BSwap(m_Value(NewLHS))))
return nullptr;
- /// OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
- /// OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
- Value *NewLHS = IsBswapLHS ? IntrLHS->getOperand(0) :
- Builder->getInt(ConstLHS->getValue().byteSwap());
+ Value *NewRHS;
+ const APInt *C;
- Value *NewRHS = IsBswapRHS ? IntrRHS->getOperand(0) :
- Builder->getInt(ConstRHS->getValue().byteSwap());
+ if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) {
+ // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
+ if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse())
+ return nullptr;
+ // NewRHS initialized by the matcher.
+ } else if (match(OldRHS, m_APInt(C))) {
+ // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
+ if (!OldLHS->hasOneUse())
+ return nullptr;
+ NewRHS = ConstantInt::get(I.getType(), C->byteSwap());
+ } else
+ return nullptr;
- Value *BinOp = Builder->CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
- Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap, ITy);
- return Builder->CreateCall(F, BinOp);
+ Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
+ Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap,
+ I.getType());
+ return Builder.CreateCall(F, BinOp);
}
/// This handles expressions of the form ((val OP C1) & C2). Where
@@ -137,7 +129,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
case Instruction::Xor:
if (Op->hasOneUse()) {
// (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
- Value *And = Builder->CreateAnd(X, AndRHS);
+ Value *And = Builder.CreateAnd(X, AndRHS);
And->takeName(Op);
return BinaryOperator::CreateXor(And, Together);
}
@@ -150,7 +142,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
// NOTE: This reduces the number of bits set in the & mask, which
// can expose opportunities for store narrowing.
Together = ConstantExpr::getXor(AndRHS, Together);
- Value *And = Builder->CreateAnd(X, Together);
+ Value *And = Builder.CreateAnd(X, Together);
And->takeName(Op);
return BinaryOperator::CreateOr(And, OpRHS);
}
@@ -182,7 +174,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
return &TheAnd;
} else {
// Pull the XOR out of the AND.
- Value *NewAnd = Builder->CreateAnd(X, AndRHS);
+ Value *NewAnd = Builder.CreateAnd(X, AndRHS);
NewAnd->takeName(Op);
return BinaryOperator::CreateXor(NewAnd, AndRHS);
}
@@ -198,7 +190,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
uint32_t BitWidth = AndRHS->getType()->getBitWidth();
uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
- ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShlMask);
+ ConstantInt *CI = Builder.getInt(AndRHS->getValue() & ShlMask);
if (CI->getValue() == ShlMask)
// Masking out bits that the shift already masks.
@@ -218,7 +210,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
uint32_t BitWidth = AndRHS->getType()->getBitWidth();
uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
- ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShrMask);
+ ConstantInt *CI = Builder.getInt(AndRHS->getValue() & ShrMask);
if (CI->getValue() == ShrMask)
// Masking out bits that the shift already masks.
@@ -238,12 +230,12 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
uint32_t BitWidth = AndRHS->getType()->getBitWidth();
uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
- Constant *C = Builder->getInt(AndRHS->getValue() & ShrMask);
+ Constant *C = Builder.getInt(AndRHS->getValue() & ShrMask);
if (C == AndRHS) { // Masking out bits shifted in.
// (Val ashr C1) & C2 -> (Val lshr C1) & C2
// Make the argument unsigned.
Value *ShVal = Op->getOperand(0);
- ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
+ ShVal = Builder.CreateLShr(ShVal, OpRHS, Op->getName());
return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
}
}
@@ -269,15 +261,15 @@ Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
- return Builder->CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
+ return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
}
// V >= Lo && V < Hi --> V - Lo u< Hi - Lo
// V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
Value *VMinusLo =
- Builder->CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
+ Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
- return Builder->CreateICmp(Pred, VMinusLo, HiMinusLo);
+ return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
}
/// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
@@ -523,7 +515,7 @@ static unsigned getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C,
/// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
/// into a single (icmp(A & X) ==/!= Y).
static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
- llvm::InstCombiner::BuilderTy *Builder) {
+ llvm::InstCombiner::BuilderTy &Builder) {
Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
unsigned Mask =
@@ -556,27 +548,27 @@ static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
if (Mask & Mask_AllZeros) {
// (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
// -> (icmp eq (A & (B|D)), 0)
- Value *NewOr = Builder->CreateOr(B, D);
- Value *NewAnd = Builder->CreateAnd(A, NewOr);
+ Value *NewOr = Builder.CreateOr(B, D);
+ Value *NewAnd = Builder.CreateAnd(A, NewOr);
// We can't use C as zero because we might actually handle
// (icmp ne (A & B), B) & (icmp ne (A & D), D)
// with B and D, having a single bit set.
Value *Zero = Constant::getNullValue(A->getType());
- return Builder->CreateICmp(NewCC, NewAnd, Zero);
+ return Builder.CreateICmp(NewCC, NewAnd, Zero);
}
if (Mask & BMask_AllOnes) {
// (icmp eq (A & B), B) & (icmp eq (A & D), D)
// -> (icmp eq (A & (B|D)), (B|D))
- Value *NewOr = Builder->CreateOr(B, D);
- Value *NewAnd = Builder->CreateAnd(A, NewOr);
- return Builder->CreateICmp(NewCC, NewAnd, NewOr);
+ Value *NewOr = Builder.CreateOr(B, D);
+ Value *NewAnd = Builder.CreateAnd(A, NewOr);
+ return Builder.CreateICmp(NewCC, NewAnd, NewOr);
}
if (Mask & AMask_AllOnes) {
// (icmp eq (A & B), A) & (icmp eq (A & D), A)
// -> (icmp eq (A & (B&D)), A)
- Value *NewAnd1 = Builder->CreateAnd(B, D);
- Value *NewAnd2 = Builder->CreateAnd(A, NewAnd1);
- return Builder->CreateICmp(NewCC, NewAnd2, A);
+ Value *NewAnd1 = Builder.CreateAnd(B, D);
+ Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
+ return Builder.CreateICmp(NewCC, NewAnd2, A);
}
// Remaining cases assume at least that B and D are constant, and depend on
@@ -644,10 +636,10 @@ static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
(CCst->getValue() ^ ECst->getValue())).getBoolValue())
return ConstantInt::get(LHS->getType(), !IsAnd);
- Value *NewOr1 = Builder->CreateOr(B, D);
+ Value *NewOr1 = Builder.CreateOr(B, D);
Value *NewOr2 = ConstantExpr::getOr(CCst, ECst);
- Value *NewAnd = Builder->CreateAnd(A, NewOr1);
- return Builder->CreateICmp(NewCC, NewAnd, NewOr2);
+ Value *NewAnd = Builder.CreateAnd(A, NewOr1);
+ return Builder.CreateICmp(NewCC, NewAnd, NewOr2);
}
return nullptr;
@@ -705,13 +697,13 @@ Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
if (Inverted)
NewPred = ICmpInst::getInversePredicate(NewPred);
- return Builder->CreateICmp(NewPred, Input, RangeEnd);
+ return Builder.CreateICmp(NewPred, Input, RangeEnd);
}
static Value *
foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
bool JoinedByAnd,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
Value *X = LHS->getOperand(0);
if (X != RHS->getOperand(0))
return nullptr;
@@ -742,8 +734,8 @@ foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
// (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2
// We choose an 'or' with a Pow2 constant rather than the inverse mask with
// 'and' because that may lead to smaller codegen from a smaller constant.
- Value *Or = Builder->CreateOr(X, ConstantInt::get(X->getType(), Xor));
- return Builder->CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2));
+ Value *Or = Builder.CreateOr(X, ConstantInt::get(X->getType(), Xor));
+ return Builder.CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2));
}
// Special case: get the ordering right when the values wrap around zero.
@@ -755,9 +747,9 @@ foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
// (X == 13 || X == 14) --> X - 13 <=u 1
// (X != 13 && X != 14) --> X - 13 >u 1
// An 'add' is the canonical IR form, so favor that over a 'sub'.
- Value *Add = Builder->CreateAdd(X, ConstantInt::get(X->getType(), -(*C1)));
+ Value *Add = Builder.CreateAdd(X, ConstantInt::get(X->getType(), -(*C1)));
auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE;
- return Builder->CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1));
+ return Builder.CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1));
}
return nullptr;
@@ -793,10 +785,10 @@ Value *InstCombiner::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
if (A == C &&
isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) &&
isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) {
- Value *Mask = Builder->CreateOr(B, D);
- Value *Masked = Builder->CreateAnd(A, Mask);
+ Value *Mask = Builder.CreateOr(B, D);
+ Value *Masked = Builder.CreateAnd(A, Mask);
auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
- return Builder->CreateICmp(NewPred, Masked, Mask);
+ return Builder.CreateICmp(NewPred, Masked, Mask);
}
}
@@ -855,8 +847,8 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
// (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) ||
(PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) {
- Value *NewOr = Builder->CreateOr(LHS0, RHS0);
- return Builder->CreateICmp(PredL, NewOr, LHSC);
+ Value *NewOr = Builder.CreateOr(LHS0, RHS0);
+ return Builder.CreateICmp(PredL, NewOr, LHSC);
}
}
@@ -888,10 +880,10 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
if ((Low & AndC->getValue()).isNullValue() &&
(Low & BigC->getValue()).isNullValue()) {
- Value *NewAnd = Builder->CreateAnd(V, Low | AndC->getValue());
+ Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue());
APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue();
Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N);
- return Builder->CreateICmp(PredL, NewAnd, NewVal);
+ return Builder.CreateICmp(PredL, NewAnd, NewVal);
}
}
}
@@ -943,14 +935,14 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
llvm_unreachable("Unknown integer condition code!");
case ICmpInst::ICMP_ULT:
if (LHSC == SubOne(RHSC)) // (X != 13 & X u< 14) -> X < 13
- return Builder->CreateICmpULT(LHS0, LHSC);
- if (LHSC->isNullValue()) // (X != 0 & X u< 14) -> X-1 u< 13
+ return Builder.CreateICmpULT(LHS0, LHSC);
+ if (LHSC->isZero()) // (X != 0 & X u< 14) -> X-1 u< 13
return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
false, true);
break; // (X != 13 & X u< 15) -> no change
case ICmpInst::ICMP_SLT:
if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13
- return Builder->CreateICmpSLT(LHS0, LHSC);
+ return Builder.CreateICmpSLT(LHS0, LHSC);
break; // (X != 13 & X s< 15) -> no change
case ICmpInst::ICMP_NE:
// Potential folds for this case should already be handled.
@@ -963,7 +955,7 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
llvm_unreachable("Unknown integer condition code!");
case ICmpInst::ICMP_NE:
if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14
- return Builder->CreateICmp(PredL, LHS0, RHSC);
+ return Builder.CreateICmp(PredL, LHS0, RHSC);
break; // (X u> 13 & X != 15) -> no change
case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
@@ -976,7 +968,7 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
llvm_unreachable("Unknown integer condition code!");
case ICmpInst::ICMP_NE:
if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14
- return Builder->CreateICmp(PredL, LHS0, RHSC);
+ return Builder.CreateICmp(PredL, LHS0, RHSC);
break; // (X s> 13 & X != 15) -> no change
case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true,
@@ -1025,15 +1017,15 @@ Value *InstCombiner::foldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
// If either of the constants are nans, then the whole thing returns
// false.
if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
- return Builder->getFalse();
- return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
+ return Builder.getFalse();
+ return Builder.CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
}
// Handle vector zeros. This occurs because the canonical form of
// "fcmp ord x,x" is "fcmp ord x, 0".
if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
isa<ConstantAggregateZero>(RHS->getOperand(1)))
- return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
+ return Builder.CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
return nullptr;
}
@@ -1088,7 +1080,7 @@ bool InstCombiner::shouldOptimizeCast(CastInst *CI) {
/// Fold {and,or,xor} (cast X), C.
static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
Constant *C;
if (!match(Logic.getOperand(1), m_Constant(C)))
return nullptr;
@@ -1107,7 +1099,7 @@ static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy);
if (ZextTruncC == C) {
// LogicOpc (zext X), C --> zext (LogicOpc X, C)
- Value *NewOp = Builder->CreateBinOp(LogicOpc, X, TruncC);
+ Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
return new ZExtInst(NewOp, DestTy);
}
}
@@ -1150,7 +1142,7 @@ Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
// fold logic(cast(A), cast(B)) -> cast(logic(A, B))
if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
- Value *NewOp = Builder->CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
+ Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
I.getName());
return CastInst::Create(CastOpcode, NewOp, DestTy);
}
@@ -1196,15 +1188,14 @@ static Instruction *foldBoolSextMaskToSelect(BinaryOperator &I) {
// Fold (and (sext bool to A), B) --> (select bool, B, 0)
Value *X = nullptr;
- if (match(Op0, m_SExt(m_Value(X))) &&
- X->getType()->getScalarType()->isIntegerTy(1)) {
+ if (match(Op0, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
Value *Zero = Constant::getNullValue(Op1->getType());
return SelectInst::Create(X, Op1, Zero);
}
// Fold (and ~(sext bool to A), B) --> (select bool, 0, B)
if (match(Op0, m_Not(m_SExt(m_Value(X)))) &&
- X->getType()->getScalarType()->isIntegerTy(1)) {
+ X->getType()->isIntOrIntVectorTy(1)) {
Value *Zero = Constant::getNullValue(Op0->getType());
return SelectInst::Create(X, Zero, Op1);
}
@@ -1283,14 +1274,14 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
return &I;
// Do this before using distributive laws to catch simple and/or/not patterns.
- if (Instruction *Xor = foldAndToXor(I, *Builder))
+ if (Instruction *Xor = foldAndToXor(I, Builder))
return Xor;
// (A|B)&(A|C) -> A|(B&C) etc
if (Value *V = SimplifyUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifyBSwap(I))
+ if (Value *V = SimplifyBSwap(I, Builder))
return replaceInstUsesWith(I, V);
if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
@@ -1310,15 +1301,15 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
APInt NotAndRHS(~AndRHSMask);
if (MaskedValueIsZero(Op0LHS, NotAndRHS, 0, &I)) {
// Not masking anything out for the LHS, move to RHS.
- Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
- Op0RHS->getName()+".masked");
+ Value *NewRHS = Builder.CreateAnd(Op0RHS, AndRHS,
+ Op0RHS->getName()+".masked");
return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
}
if (!isa<Constant>(Op0RHS) &&
MaskedValueIsZero(Op0RHS, NotAndRHS, 0, &I)) {
// Not masking anything out for the RHS, move to LHS.
- Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
- Op0LHS->getName()+".masked");
+ Value *NewLHS = Builder.CreateAnd(Op0LHS, AndRHS,
+ Op0LHS->getName()+".masked");
return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
}
@@ -1337,7 +1328,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// (1 >> x) & 1 --> zext(x == 0)
if (AndRHSMask.isOneValue() && Op0LHS == AndRHS) {
Value *NewICmp =
- Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
+ Builder.CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
return new ZExtInst(NewICmp, I.getType());
}
break;
@@ -1360,11 +1351,11 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType());
Value *BinOp;
if (isa<ZExtInst>(Op0LHS))
- BinOp = Builder->CreateBinOp(Op0I->getOpcode(), X, TruncC1);
+ BinOp = Builder.CreateBinOp(Op0I->getOpcode(), X, TruncC1);
else
- BinOp = Builder->CreateBinOp(Op0I->getOpcode(), TruncC1, X);
+ BinOp = Builder.CreateBinOp(Op0I->getOpcode(), TruncC1, X);
auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType());
- auto *And = Builder->CreateAnd(BinOp, TruncC2);
+ auto *And = Builder.CreateAnd(BinOp, TruncC2);
return new ZExtInst(And, I.getType());
}
}
@@ -1384,7 +1375,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// into : and (trunc X to T), trunc(YC) & C2
// This will fold the two constants together, which may allow
// other simplifications.
- Value *NewCast = Builder->CreateTrunc(X, I.getType(), "and.shrunk");
+ Value *NewCast = Builder.CreateTrunc(X, I.getType(), "and.shrunk");
Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
C3 = ConstantExpr::getAnd(C3, AndRHS);
return BinaryOperator::CreateAnd(NewCast, C3);
@@ -1396,7 +1387,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
return FoldedLogic;
- if (Instruction *DeMorgan = matchDeMorgansLaws(I, *Builder))
+ if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
return DeMorgan;
{
@@ -1422,7 +1413,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// an endless loop. By checking that A is non-constant we ensure that
// we will never get to the loop.
if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B
- return BinaryOperator::CreateAnd(A, Builder->CreateNot(B));
+ return BinaryOperator::CreateAnd(A, Builder.CreateNot(B));
}
}
@@ -1436,13 +1427,13 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
if (Op1->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
- return BinaryOperator::CreateAnd(Op0, Builder->CreateNot(C));
+ return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C));
// ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
if (Op0->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
- return BinaryOperator::CreateAnd(Op1, Builder->CreateNot(C));
+ return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
// (A | B) & ((~A) ^ B) -> (A & B)
// (A | B) & (B ^ (~A)) -> (A & B)
@@ -1474,18 +1465,18 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
if (auto *Cmp = dyn_cast<ICmpInst>(X))
if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
- return replaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
+ return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
- return replaceInstUsesWith(I, Builder->CreateAnd(Res, X));
+ return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
}
if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
if (auto *Cmp = dyn_cast<ICmpInst>(X))
if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
- return replaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
+ return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
- return replaceInstUsesWith(I, Builder->CreateAnd(Res, X));
+ return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
}
}
@@ -1567,14 +1558,14 @@ static Value *getSelectCondition(Value *A, Value *B,
InstCombiner::BuilderTy &Builder) {
// If these are scalars or vectors of i1, A can be used directly.
Type *Ty = A->getType();
- if (match(A, m_Not(m_Specific(B))) && Ty->getScalarType()->isIntegerTy(1))
+ if (match(A, m_Not(m_Specific(B))) && Ty->isIntOrIntVectorTy(1))
return A;
// If A and B are sign-extended, look through the sexts to find the booleans.
Value *Cond;
Value *NotB;
if (match(A, m_SExt(m_Value(Cond))) &&
- Cond->getType()->getScalarType()->isIntegerTy(1) &&
+ Cond->getType()->isIntOrIntVectorTy(1) &&
match(B, m_OneUse(m_Not(m_Value(NotB))))) {
NotB = peekThroughBitcast(NotB, true);
if (match(NotB, m_SExt(m_Specific(Cond))))
@@ -1596,7 +1587,7 @@ static Value *getSelectCondition(Value *A, Value *B,
// operand, see if the constants are inverse bitmasks.
if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AC)))) &&
match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BC)))) &&
- Cond->getType()->getScalarType()->isIntegerTy(1) &&
+ Cond->getType()->isIntOrIntVectorTy(1) &&
areInverseVectorBitmasks(AC, BC)) {
AC = ConstantExpr::getTrunc(AC, CmpInst::makeCmpResultType(Ty));
return Builder.CreateXor(Cond, AC);
@@ -1687,9 +1678,9 @@ Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
RangeDiff.ugt(LHSC->getValue())) {
Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC);
- Value *NewAnd = Builder->CreateAnd(LAddOpnd, MaskC);
- Value *NewAdd = Builder->CreateAdd(NewAnd, MaxAddC);
- return (Builder->CreateICmp(LHS->getPredicate(), NewAdd, LHSC));
+ Value *NewAnd = Builder.CreateAnd(LAddOpnd, MaskC);
+ Value *NewAdd = Builder.CreateAdd(NewAnd, MaxAddC);
+ return Builder.CreateICmp(LHS->getPredicate(), NewAdd, LHSC);
}
}
}
@@ -1736,9 +1727,9 @@ Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
A = LHS->getOperand(1);
}
if (A && B)
- return Builder->CreateICmp(
+ return Builder.CreateICmp(
ICmpInst::ICMP_UGE,
- Builder->CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
+ Builder.CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
}
// E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
@@ -1759,8 +1750,8 @@ Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
if (LHSC == RHSC && PredL == PredR) {
// (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) {
- Value *NewOr = Builder->CreateOr(LHS0, RHS0);
- return Builder->CreateICmp(PredL, NewOr, LHSC);
+ Value *NewOr = Builder.CreateOr(LHS0, RHS0);
+ return Builder.CreateICmp(PredL, NewOr, LHSC);
}
}
@@ -1770,7 +1761,7 @@ Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
ConstantInt *AddC;
if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC))))
if (RHSC->getValue() + AddC->getValue() == LHSC->getValue())
- return Builder->CreateICmpULE(LHS0, LHSC);
+ return Builder.CreateICmpULE(LHS0, LHSC);
}
// From here on, we only handle:
@@ -1886,18 +1877,18 @@ Value *InstCombiner::foldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
// If either of the constants are nans, then the whole thing returns
// true.
if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
- return Builder->getTrue();
+ return Builder.getTrue();
// Otherwise, no need to compare the two constants, compare the
// rest.
- return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
+ return Builder.CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
}
// Handle vector zeros. This occurs because the canonical form of
// "fcmp uno x,x" is "fcmp uno x, 0".
if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
isa<ConstantAggregateZero>(RHS->getOperand(1)))
- return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
+ return Builder.CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
return nullptr;
}
@@ -1916,7 +1907,7 @@ Value *InstCombiner::foldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
/// when the XOR of the two constants is "all ones" (-1).
static Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
Value *A, Value *B, Value *C,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
if (!CI1) return nullptr;
@@ -1928,7 +1919,7 @@ static Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
if (!Xor.isAllOnesValue()) return nullptr;
if (V1 == A || V1 == B) {
- Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
+ Value *NewOp = Builder.CreateAnd((V1 == A) ? B : A, CI1);
return BinaryOperator::CreateOr(NewOp, V1);
}
@@ -1946,7 +1937,7 @@ static Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
/// when the XOR of the two constants is "all ones" (-1).
static Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op,
Value *A, Value *B, Value *C,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
if (!CI1)
return nullptr;
@@ -1961,7 +1952,7 @@ static Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op,
return nullptr;
if (V1 == A || V1 == B) {
- Value *NewOp = Builder->CreateAnd(V1 == A ? B : A, CI1);
+ Value *NewOp = Builder.CreateAnd(V1 == A ? B : A, CI1);
return BinaryOperator::CreateXor(NewOp, V1);
}
@@ -1987,14 +1978,14 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return &I;
// Do this before using distributive laws to catch simple and/or/not patterns.
- if (Instruction *Xor = foldOrToXor(I, *Builder))
+ if (Instruction *Xor = foldOrToXor(I, Builder))
return Xor;
// (A&B)|(A&C) -> A&(B|C) etc
if (Value *V = SimplifyUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifyBSwap(I))
+ if (Value *V = SimplifyBSwap(I, Builder))
return replaceInstUsesWith(I, V);
if (isa<Constant>(Op1))
@@ -2011,7 +2002,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// (X^C)|Y -> (X|Y)^C iff Y&C == 0
if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
MaskedValueIsZero(Op1, *C, 0, &I)) {
- Value *NOr = Builder->CreateOr(A, Op1);
+ Value *NOr = Builder.CreateOr(A, Op1);
NOr->takeName(Op0);
return BinaryOperator::CreateXor(NOr,
ConstantInt::get(NOr->getType(), *C));
@@ -2020,7 +2011,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// Y|(X^C) -> (X|Y)^C iff Y&C == 0
if (match(Op1, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
MaskedValueIsZero(Op0, *C, 0, &I)) {
- Value *NOr = Builder->CreateOr(A, Op0);
+ Value *NOr = Builder.CreateOr(A, Op0);
NOr->takeName(Op0);
return BinaryOperator::CreateXor(NOr,
ConstantInt::get(NOr->getType(), *C));
@@ -2058,7 +2049,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
(V2 == B &&
MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V)
return BinaryOperator::CreateAnd(A,
- Builder->getInt(C1->getValue()|C2->getValue()));
+ Builder.getInt(C1->getValue()|C2->getValue()));
// Or commutes, try both ways.
if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
((V1 == A &&
@@ -2066,7 +2057,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
(V2 == A &&
MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V)
return BinaryOperator::CreateAnd(B,
- Builder->getInt(C1->getValue()|C2->getValue()));
+ Builder.getInt(C1->getValue()|C2->getValue()));
// ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
// iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
@@ -2075,9 +2066,9 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
(C3->getValue() & ~C1->getValue()).isNullValue() &&
match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
(C4->getValue() & ~C2->getValue()).isNullValue()) {
- V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
+ V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
return BinaryOperator::CreateAnd(V2,
- Builder->getInt(C1->getValue()|C2->getValue()));
+ Builder.getInt(C1->getValue()|C2->getValue()));
}
}
}
@@ -2087,21 +2078,21 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// 'or' that it is replacing.
if (Op0->hasOneUse() || Op1->hasOneUse()) {
// (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
- if (Value *V = matchSelectFromAndOr(A, C, B, D, *Builder))
+ if (Value *V = matchSelectFromAndOr(A, C, B, D, Builder))
return replaceInstUsesWith(I, V);
- if (Value *V = matchSelectFromAndOr(A, C, D, B, *Builder))
+ if (Value *V = matchSelectFromAndOr(A, C, D, B, Builder))
return replaceInstUsesWith(I, V);
- if (Value *V = matchSelectFromAndOr(C, A, B, D, *Builder))
+ if (Value *V = matchSelectFromAndOr(C, A, B, D, Builder))
return replaceInstUsesWith(I, V);
- if (Value *V = matchSelectFromAndOr(C, A, D, B, *Builder))
+ if (Value *V = matchSelectFromAndOr(C, A, D, B, Builder))
return replaceInstUsesWith(I, V);
- if (Value *V = matchSelectFromAndOr(B, D, A, C, *Builder))
+ if (Value *V = matchSelectFromAndOr(B, D, A, C, Builder))
return replaceInstUsesWith(I, V);
- if (Value *V = matchSelectFromAndOr(B, D, C, A, *Builder))
+ if (Value *V = matchSelectFromAndOr(B, D, C, A, Builder))
return replaceInstUsesWith(I, V);
- if (Value *V = matchSelectFromAndOr(D, B, A, C, *Builder))
+ if (Value *V = matchSelectFromAndOr(D, B, A, C, Builder))
return replaceInstUsesWith(I, V);
- if (Value *V = matchSelectFromAndOr(D, B, C, A, *Builder))
+ if (Value *V = matchSelectFromAndOr(D, B, C, A, Builder))
return replaceInstUsesWith(I, V);
}
@@ -2139,9 +2130,9 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// ((B | C) & A) | B -> B | (A & C)
if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
- return BinaryOperator::CreateOr(Op1, Builder->CreateAnd(A, C));
+ return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C));
- if (Instruction *DeMorgan = matchDeMorgansLaws(I, *Builder))
+ if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
return DeMorgan;
// Canonicalize xor to the RHS.
@@ -2163,11 +2154,11 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return BinaryOperator::CreateOr(A, B);
if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
- Value *Not = Builder->CreateNot(B, B->getName()+".not");
+ Value *Not = Builder.CreateNot(B, B->getName() + ".not");
return BinaryOperator::CreateOr(Not, Op0);
}
if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
- Value *Not = Builder->CreateNot(A, A->getName()+".not");
+ Value *Not = Builder.CreateNot(A, A->getName() + ".not");
return BinaryOperator::CreateOr(Not, Op0);
}
}
@@ -2181,7 +2172,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
B->getOpcode() == Instruction::Xor)) {
Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
B->getOperand(0);
- Value *Not = Builder->CreateNot(NotOp, NotOp->getName()+".not");
+ Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not");
return BinaryOperator::CreateOr(Not, Op0);
}
@@ -2194,7 +2185,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// xor was canonicalized to Op1 above.
if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
match(Op0, m_c_And(m_Specific(A), m_Specific(B))))
- return BinaryOperator::CreateXor(Builder->CreateNot(A), B);
+ return BinaryOperator::CreateXor(Builder.CreateNot(A), B);
if (SwappedForXor)
std::swap(Op0, Op1);
@@ -2212,18 +2203,18 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
if (auto *Cmp = dyn_cast<ICmpInst>(X))
if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
- return replaceInstUsesWith(I, Builder->CreateOr(Res, Y));
+ return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
- return replaceInstUsesWith(I, Builder->CreateOr(Res, X));
+ return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
}
if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
if (auto *Cmp = dyn_cast<ICmpInst>(X))
if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
- return replaceInstUsesWith(I, Builder->CreateOr(Res, Y));
+ return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
- return replaceInstUsesWith(I, Builder->CreateOr(Res, X));
+ return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
}
}
@@ -2238,10 +2229,10 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
- A->getType()->getScalarType()->isIntegerTy(1))
+ A->getType()->isIntOrIntVectorTy(1))
return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1);
if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
- A->getType()->getScalarType()->isIntegerTy(1))
+ A->getType()->isIntOrIntVectorTy(1))
return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0);
// Note: If we've gotten to the point of visiting the outer OR, then the
@@ -2252,7 +2243,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
ConstantInt *C1;
if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
- Value *Inner = Builder->CreateOr(A, Op1);
+ Value *Inner = Builder.CreateOr(A, Op1);
Inner->takeName(Op0);
return BinaryOperator::CreateOr(Inner, C1);
}
@@ -2265,8 +2256,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (Op0->hasOneUse() && Op1->hasOneUse() &&
match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
- Value *orTrue = Builder->CreateOr(A, C);
- Value *orFalse = Builder->CreateOr(B, D);
+ Value *orTrue = Builder.CreateOr(A, C);
+ Value *orFalse = Builder.CreateOr(B, D);
return SelectInst::Create(X, orTrue, orFalse);
}
}
@@ -2276,7 +2267,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
/// A ^ B can be specified using other logic ops in a variety of patterns. We
/// can fold these early and efficiently by morphing an existing instruction.
-static Instruction *foldXorToXor(BinaryOperator &I) {
+static Instruction *foldXorToXor(BinaryOperator &I,
+ InstCombiner::BuilderTy &Builder) {
assert(I.getOpcode() == Instruction::Xor);
Value *Op0 = I.getOperand(0);
Value *Op1 = I.getOperand(1);
@@ -2323,6 +2315,21 @@ static Instruction *foldXorToXor(BinaryOperator &I) {
return &I;
}
+ // For the remaining cases we need to get rid of one of the operands.
+ if (!Op0->hasOneUse() && !Op1->hasOneUse())
+ return nullptr;
+
+ // (A | B) ^ ~(A & B) -> ~(A ^ B)
+ // (A | B) ^ ~(B & A) -> ~(A ^ B)
+ // (A & B) ^ ~(A | B) -> ~(A ^ B)
+ // (A & B) ^ ~(B | A) -> ~(A ^ B)
+ // Complexity sorting ensures the not will be on the right side.
+ if ((match(Op0, m_Or(m_Value(A), m_Value(B))) &&
+ match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) ||
+ (match(Op0, m_And(m_Value(A), m_Value(B))) &&
+ match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))))
+ return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
+
return nullptr;
}
@@ -2355,12 +2362,12 @@ Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
if (OrICmp == LHS && AndICmp == RHS && RHS->hasOneUse()) {
// (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS
RHS->setPredicate(RHS->getInversePredicate());
- return Builder->CreateAnd(LHS, RHS);
+ return Builder.CreateAnd(LHS, RHS);
}
if (OrICmp == RHS && AndICmp == LHS && LHS->hasOneUse()) {
// !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS
LHS->setPredicate(LHS->getInversePredicate());
- return Builder->CreateAnd(LHS, RHS);
+ return Builder.CreateAnd(LHS, RHS);
}
}
}
@@ -2381,7 +2388,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (Value *V = SimplifyXorInst(Op0, Op1, SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
- if (Instruction *NewXor = foldXorToXor(I))
+ if (Instruction *NewXor = foldXorToXor(I, Builder))
return NewXor;
// (A&B)^(A&C) -> A&(B^C) etc
@@ -2393,7 +2400,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (SimplifyDemandedInstructionBits(I))
return &I;
- if (Value *V = SimplifyBSwap(I))
+ if (Value *V = SimplifyBSwap(I, Builder))
return replaceInstUsesWith(I, V);
// Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
@@ -2404,13 +2411,13 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
// ~(~X & Y) --> (X | ~Y)
// ~(Y & ~X) --> (X | ~Y)
if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) {
- Value *NotY = Builder->CreateNot(Y, Y->getName() + ".not");
+ Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
return BinaryOperator::CreateOr(X, NotY);
}
// ~(~X | Y) --> (X & ~Y)
// ~(Y | ~X) --> (X & ~Y)
if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) {
- Value *NotY = Builder->CreateNot(Y, Y->getName() + ".not");
+ Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
return BinaryOperator::CreateAnd(X, NotY);
}
@@ -2426,8 +2433,8 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
NotVal->getOperand(0)->hasOneUse()) &&
IsFreeToInvert(NotVal->getOperand(1),
NotVal->getOperand(1)->hasOneUse())) {
- Value *NotX = Builder->CreateNot(NotVal->getOperand(0), "notlhs");
- Value *NotY = Builder->CreateNot(NotVal->getOperand(1), "notrhs");
+ Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs");
+ Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs");
if (NotVal->getOpcode() == Instruction::And)
return BinaryOperator::CreateOr(NotX, NotY);
return BinaryOperator::CreateAnd(NotX, NotY);
@@ -2457,7 +2464,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
}
// not (cmp A, B) = !cmp A, B
- ICmpInst::Predicate Pred;
+ CmpInst::Predicate Pred;
if (match(&I, m_Not(m_OneUse(m_Cmp(Pred, m_Value(), m_Value()))))) {
cast<CmpInst>(Op0)->setPredicate(CmpInst::getInversePredicate(Pred));
return replaceInstUsesWith(I, Op0);
@@ -2470,8 +2477,8 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (CI->hasOneUse() && Op0C->hasOneUse()) {
Instruction::CastOps Opcode = Op0C->getOpcode();
if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
- (RHSC == ConstantExpr::getCast(Opcode, Builder->getTrue(),
- Op0C->getDestTy()))) {
+ (RHSC == ConstantExpr::getCast(Opcode, Builder.getTrue(),
+ Op0C->getDestTy()))) {
CI->setPredicate(CI->getInversePredicate());
return CastInst::Create(Opcode, CI, Op0C->getType());
}
@@ -2481,7 +2488,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
// ~(c-X) == X-c-1 == X+(-c-1)
- if (Op0I->getOpcode() == Instruction::Sub && RHSC->isAllOnesValue())
+ if (Op0I->getOpcode() == Instruction::Sub && RHSC->isMinusOne())
if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
return BinaryOperator::CreateAdd(Op0I->getOperand(1),
@@ -2491,13 +2498,13 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
if (Op0I->getOpcode() == Instruction::Add) {
// ~(X-c) --> (-c-1)-X
- if (RHSC->isAllOnesValue()) {
+ if (RHSC->isMinusOne()) {
Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
return BinaryOperator::CreateSub(SubOne(NegOp0CI),
Op0I->getOperand(0));
} else if (RHSC->getValue().isSignMask()) {
// (X + C) ^ signmask -> (X + C + signmask)
- Constant *C = Builder->getInt(RHSC->getValue() + Op0CI->getValue());
+ Constant *C = Builder.getInt(RHSC->getValue() + Op0CI->getValue());
return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
}
@@ -2530,7 +2537,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
APInt FoldConst = C1->getValue().lshr(C2->getValue());
FoldConst ^= C3->getValue();
// Prepare the two operands.
- Value *Opnd0 = Builder->CreateLShr(E1->getOperand(0), C2);
+ Value *Opnd0 = Builder.CreateLShr(E1->getOperand(0), C2);
Opnd0->takeName(Op0I);
cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc());
Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst);
@@ -2575,14 +2582,14 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (A == Op1) // (B|A)^B == (A|B)^B
std::swap(A, B);
if (B == Op1) // (A|B)^B == A & ~B
- return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1));
+ return BinaryOperator::CreateAnd(A, Builder.CreateNot(Op1));
} else if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B))))) {
if (A == Op1) // (A&B)^A -> (B&A)^A
std::swap(A, B);
const APInt *C;
if (B == Op1 && // (B&A)^A == ~B & A
!match(Op1, m_APInt(C))) { // Canonical form is (B&C)^C
- return BinaryOperator::CreateAnd(Builder->CreateNot(A), Op1);
+ return BinaryOperator::CreateAnd(Builder.CreateNot(A), Op1);
}
}
}
@@ -2594,20 +2601,20 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
match(Op1, m_Or(m_Value(A), m_Value(B)))) {
if (D == A)
return BinaryOperator::CreateXor(
- Builder->CreateAnd(Builder->CreateNot(A), B), C);
+ Builder.CreateAnd(Builder.CreateNot(A), B), C);
if (D == B)
return BinaryOperator::CreateXor(
- Builder->CreateAnd(Builder->CreateNot(B), A), C);
+ Builder.CreateAnd(Builder.CreateNot(B), A), C);
}
// (A | B)^(A ^ C) -> ((~A) & B) ^ C
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
match(Op1, m_Xor(m_Value(D), m_Value(C)))) {
if (D == A)
return BinaryOperator::CreateXor(
- Builder->CreateAnd(Builder->CreateNot(A), B), C);
+ Builder.CreateAnd(Builder.CreateNot(A), B), C);
if (D == B)
return BinaryOperator::CreateXor(
- Builder->CreateAnd(Builder->CreateNot(B), A), C);
+ Builder.CreateAnd(Builder.CreateNot(B), A), C);
}
// (A & B) ^ (A ^ B) -> (A | B)
if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
@@ -2624,7 +2631,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
Value *A, *B;
if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
match(Op1, m_Not(m_Specific(A))))
- return BinaryOperator::CreateNot(Builder->CreateAnd(A, B));
+ return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 3770021de100..391c430dab75 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -128,23 +128,23 @@ Instruction *InstCombiner::SimplifyElementUnorderedAtomicMemCpy(
Type::getIntNPtrTy(AMI->getContext(), ElementSizeInBits,
Src->getType()->getPointerAddressSpace());
- Value *SrcCasted = Builder->CreatePointerCast(Src, ElementPointerType,
- "memcpy_unfold.src_casted");
- Value *DstCasted = Builder->CreatePointerCast(Dst, ElementPointerType,
- "memcpy_unfold.dst_casted");
+ Value *SrcCasted = Builder.CreatePointerCast(Src, ElementPointerType,
+ "memcpy_unfold.src_casted");
+ Value *DstCasted = Builder.CreatePointerCast(Dst, ElementPointerType,
+ "memcpy_unfold.dst_casted");
for (uint64_t i = 0; i < NumElements; ++i) {
// Get current element addresses
ConstantInt *ElementIdxCI =
ConstantInt::get(AMI->getContext(), APInt(64, i));
Value *SrcElementAddr =
- Builder->CreateGEP(SrcCasted, ElementIdxCI, "memcpy_unfold.src_addr");
+ Builder.CreateGEP(SrcCasted, ElementIdxCI, "memcpy_unfold.src_addr");
Value *DstElementAddr =
- Builder->CreateGEP(DstCasted, ElementIdxCI, "memcpy_unfold.dst_addr");
+ Builder.CreateGEP(DstCasted, ElementIdxCI, "memcpy_unfold.dst_addr");
// Load from the source. Transfer alignment information and mark load as
// unordered atomic.
- LoadInst *Load = Builder->CreateLoad(SrcElementAddr, "memcpy_unfold.val");
+ LoadInst *Load = Builder.CreateLoad(SrcElementAddr, "memcpy_unfold.val");
Load->setOrdering(AtomicOrdering::Unordered);
// We know alignment of the first element. It is also guaranteed by the
// verifier that element size is less or equal than first element
@@ -157,7 +157,7 @@ Instruction *InstCombiner::SimplifyElementUnorderedAtomicMemCpy(
Load->setDebugLoc(AMI->getDebugLoc());
// Store loaded value via unordered atomic store.
- StoreInst *Store = Builder->CreateStore(Load, DstElementAddr);
+ StoreInst *Store = Builder.CreateStore(Load, DstElementAddr);
Store->setOrdering(AtomicOrdering::Unordered);
Store->setAlignment(i == 0 ? AMI->getParamAlignment(0)
: ElementSizeInBytes);
@@ -213,7 +213,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
if (M->getNumOperands() == 3 && M->getOperand(0) &&
mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
- mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
+ mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
M->getOperand(1) &&
mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
@@ -227,9 +227,9 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
SrcAlign = std::max(SrcAlign, CopyAlign);
DstAlign = std::max(DstAlign, CopyAlign);
- Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
- Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
- LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
+ Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
+ Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
+ LoadInst *L = Builder.CreateLoad(Src, MI->isVolatile());
L->setAlignment(SrcAlign);
if (CopyMD)
L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
@@ -238,7 +238,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
if (LoopMemParallelMD)
L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
- StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
+ StoreInst *S = Builder.CreateStore(L, Dest, MI->isVolatile());
S->setAlignment(DstAlign);
if (CopyMD)
S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
@@ -274,15 +274,15 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
Value *Dest = MI->getDest();
unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
- Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
+ Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
// Alignment 0 is identity for alignment 1 for memset, but not store.
if (Alignment == 0) Alignment = 1;
// Extract the fill value and store.
uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
- StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
- MI->isVolatile());
+ StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
+ MI->isVolatile());
S->setAlignment(Alignment);
// Set the size of the copy to 0, it will be deleted on the next iteration.
@@ -600,8 +600,7 @@ static Value *simplifyX86muldq(const IntrinsicInst &II,
return Builder.CreateMul(LHS, RHS);
}
-static Value *simplifyX86pack(IntrinsicInst &II, InstCombiner &IC,
- InstCombiner::BuilderTy &Builder, bool IsSigned) {
+static Value *simplifyX86pack(IntrinsicInst &II, bool IsSigned) {
Value *Arg0 = II.getArgOperand(0);
Value *Arg1 = II.getArgOperand(1);
Type *ResTy = II.getType();
@@ -676,8 +675,7 @@ static Value *simplifyX86pack(IntrinsicInst &II, InstCombiner &IC,
return ConstantVector::get(Vals);
}
-static Value *simplifyX86movmsk(const IntrinsicInst &II,
- InstCombiner::BuilderTy &Builder) {
+static Value *simplifyX86movmsk(const IntrinsicInst &II) {
Value *Arg = II.getArgOperand(0);
Type *ResTy = II.getType();
Type *ArgTy = Arg->getType();
@@ -860,7 +858,7 @@ static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
}
// Constant Fold - extraction from zero is always {zero, undef}.
- if (CI0 && CI0->equalsInt(0))
+ if (CI0 && CI0->isZero())
return LowConstantHighUndef(0);
return nullptr;
@@ -1404,7 +1402,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
&IC.getDominatorTree())) {
if (!match(II.getArgOperand(1), m_One())) {
- II.setOperand(1, IC.Builder->getTrue());
+ II.setOperand(1, IC.Builder.getTrue());
return &II;
}
}
@@ -1477,7 +1475,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
// the LLVM intrinsic definition for the pointer argument.
unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
- Value *PtrCast = IC.Builder->CreateBitCast(Ptr, VecPtrTy, "castvec");
+ Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
// Second, convert the x86 XMM integer vector mask to a vector of bools based
// on each element's most significant bit (the sign bit).
@@ -1485,7 +1483,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
// The pass-through vector for an x86 masked load is a zero vector.
CallInst *NewMaskedLoad =
- IC.Builder->CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
+ IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
return IC.replaceInstUsesWith(II, NewMaskedLoad);
}
@@ -1520,13 +1518,13 @@ static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
// the LLVM intrinsic definition for the pointer argument.
unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
- Value *PtrCast = IC.Builder->CreateBitCast(Ptr, VecPtrTy, "castvec");
+ Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
// Second, convert the x86 XMM integer vector mask to a vector of bools based
// on each element's most significant bit (the sign bit).
Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
- IC.Builder->CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
+ IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
// 'Replace uses' doesn't work for stores. Erase the original masked store.
IC.eraseInstFromFunction(II);
@@ -1764,7 +1762,7 @@ static Instruction *SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
// The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
// need special handling.
//
- // We seem to be mising intrinsics for rcp.approx.{ftz.}f32, which is just
+ // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
// as well.
case Intrinsic::nvvm_rcp_rn_d:
return {SPC_Reciprocal, FTZ_Any};
@@ -1965,16 +1963,17 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Value *IIOperand = II->getArgOperand(0);
Value *X = nullptr;
+ // TODO should this be in InstSimplify?
// bswap(bswap(x)) -> x
if (match(IIOperand, m_BSwap(m_Value(X))))
- return replaceInstUsesWith(CI, X);
+ return replaceInstUsesWith(CI, X);
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
unsigned C = X->getType()->getPrimitiveSizeInBits() -
IIOperand->getType()->getPrimitiveSizeInBits();
Value *CV = ConstantInt::get(X->getType(), C);
- Value *V = Builder->CreateLShr(X, CV);
+ Value *V = Builder.CreateLShr(X, CV);
return new TruncInst(V, IIOperand->getType());
}
break;
@@ -1984,6 +1983,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Value *IIOperand = II->getArgOperand(0);
Value *X = nullptr;
+ // TODO should this be in InstSimplify?
// bitreverse(bitreverse(x)) -> x
if (match(IIOperand, m_BitReverse(m_Value(X))))
return replaceInstUsesWith(CI, X);
@@ -1991,7 +1991,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
case Intrinsic::masked_load:
- if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, *Builder))
+ if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, Builder))
return replaceInstUsesWith(CI, SimplifiedMaskedOp);
break;
case Intrinsic::masked_store:
@@ -2010,7 +2010,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (Power->isOne())
return replaceInstUsesWith(CI, II->getArgOperand(0));
// powi(x, -1) -> 1/x
- if (Power->isAllOnesValue())
+ if (Power->isMinusOne())
return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
II->getArgOperand(0));
}
@@ -2073,11 +2073,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::fmuladd: {
// Canonicalize fast fmuladd to the separate fmul + fadd.
if (II->hasUnsafeAlgebra()) {
- BuilderTy::FastMathFlagGuard Guard(*Builder);
- Builder->setFastMathFlags(II->getFastMathFlags());
- Value *Mul = Builder->CreateFMul(II->getArgOperand(0),
- II->getArgOperand(1));
- Value *Add = Builder->CreateFAdd(Mul, II->getArgOperand(2));
+ BuilderTy::FastMathFlagGuard Guard(Builder);
+ Builder.setFastMathFlags(II->getFastMathFlags());
+ Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
+ II->getArgOperand(1));
+ Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
Add->takeName(II);
return replaceInstUsesWith(*II, Add);
}
@@ -2128,8 +2128,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Constant *LHS, *RHS;
if (match(II->getArgOperand(0),
m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
- CallInst *Call0 = Builder->CreateCall(II->getCalledFunction(), {LHS});
- CallInst *Call1 = Builder->CreateCall(II->getCalledFunction(), {RHS});
+ CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
+ CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
return SelectInst::Create(Cond, Call0, Call1);
}
@@ -2147,7 +2147,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// fabs (fpext x) -> fpext (fabs x)
Value *F = Intrinsic::getDeclaration(II->getModule(), II->getIntrinsicID(),
{ ExtSrc->getType() });
- CallInst *NewFabs = Builder->CreateCall(F, ExtSrc);
+ CallInst *NewFabs = Builder.CreateCall(F, ExtSrc);
NewFabs->copyFastMathFlags(II);
NewFabs->takeName(II);
return new FPExtInst(NewFabs, II->getType());
@@ -2174,7 +2174,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Turn PPC lvx -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
&DT) >= 16) {
- Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
+ Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
}
@@ -2182,8 +2182,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_vsx_lxvw4x:
case Intrinsic::ppc_vsx_lxvd2x: {
// Turn PPC VSX loads into normal loads.
- Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
- PointerType::getUnqual(II->getType()));
+ Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
+ PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr, Twine(""), false, 1);
}
case Intrinsic::ppc_altivec_stvx:
@@ -2193,7 +2193,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
&DT) >= 16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
+ Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr);
}
break;
@@ -2201,18 +2201,18 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_vsx_stxvd2x: {
// Turn PPC VSX stores into normal stores.
Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
+ Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
}
case Intrinsic::ppc_qpx_qvlfs:
// Turn PPC QPX qvlfs -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
&DT) >= 16) {
- Type *VTy = VectorType::get(Builder->getFloatTy(),
+ Type *VTy = VectorType::get(Builder.getFloatTy(),
II->getType()->getVectorNumElements());
- Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
+ Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(VTy));
- Value *Load = Builder->CreateLoad(Ptr);
+ Value *Load = Builder.CreateLoad(Ptr);
return new FPExtInst(Load, II->getType());
}
break;
@@ -2220,7 +2220,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Turn PPC QPX qvlfd -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
&DT) >= 32) {
- Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
+ Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
}
@@ -2229,11 +2229,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Turn PPC QPX qvstfs -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
&DT) >= 16) {
- Type *VTy = VectorType::get(Builder->getFloatTy(),
+ Type *VTy = VectorType::get(Builder.getFloatTy(),
II->getArgOperand(0)->getType()->getVectorNumElements());
- Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
+ Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
Type *OpPtrTy = PointerType::getUnqual(VTy);
- Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
+ Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(TOp, Ptr);
}
break;
@@ -2243,7 +2243,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
&DT) >= 32) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
+ Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr);
}
break;
@@ -2272,15 +2272,15 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
SmallVector<uint32_t, 8> SubVecMask;
for (unsigned i = 0; i != RetWidth; ++i)
SubVecMask.push_back((int)i);
- VectorHalfAsShorts = Builder->CreateShuffleVector(
+ VectorHalfAsShorts = Builder.CreateShuffleVector(
Arg, UndefValue::get(ArgType), SubVecMask);
}
auto VectorHalfType =
VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
auto VectorHalfs =
- Builder->CreateBitCast(VectorHalfAsShorts, VectorHalfType);
- auto VectorFloats = Builder->CreateFPExt(VectorHalfs, RetType);
+ Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
+ auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
return replaceInstUsesWith(*II, VectorFloats);
}
@@ -2334,7 +2334,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx_movmsk_pd_256:
case Intrinsic::x86_avx_movmsk_ps_256:
case Intrinsic::x86_avx2_pmovmskb: {
- if (Value *V = simplifyX86movmsk(*II, *Builder))
+ if (Value *V = simplifyX86movmsk(*II))
return replaceInstUsesWith(*II, V);
break;
}
@@ -2437,25 +2437,25 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
default: llvm_unreachable("Case stmts out of sync!");
case Intrinsic::x86_avx512_mask_add_ps_512:
case Intrinsic::x86_avx512_mask_add_pd_512:
- V = Builder->CreateFAdd(Arg0, Arg1);
+ V = Builder.CreateFAdd(Arg0, Arg1);
break;
case Intrinsic::x86_avx512_mask_sub_ps_512:
case Intrinsic::x86_avx512_mask_sub_pd_512:
- V = Builder->CreateFSub(Arg0, Arg1);
+ V = Builder.CreateFSub(Arg0, Arg1);
break;
case Intrinsic::x86_avx512_mask_mul_ps_512:
case Intrinsic::x86_avx512_mask_mul_pd_512:
- V = Builder->CreateFMul(Arg0, Arg1);
+ V = Builder.CreateFMul(Arg0, Arg1);
break;
case Intrinsic::x86_avx512_mask_div_ps_512:
case Intrinsic::x86_avx512_mask_div_pd_512:
- V = Builder->CreateFDiv(Arg0, Arg1);
+ V = Builder.CreateFDiv(Arg0, Arg1);
break;
}
// Create a select for the masking.
V = emitX86MaskSelect(II->getArgOperand(3), V, II->getArgOperand(2),
- *Builder);
+ Builder);
return replaceInstUsesWith(*II, V);
}
}
@@ -2476,27 +2476,27 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Extract the element as scalars.
Value *Arg0 = II->getArgOperand(0);
Value *Arg1 = II->getArgOperand(1);
- Value *LHS = Builder->CreateExtractElement(Arg0, (uint64_t)0);
- Value *RHS = Builder->CreateExtractElement(Arg1, (uint64_t)0);
+ Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
+ Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
Value *V;
switch (II->getIntrinsicID()) {
default: llvm_unreachable("Case stmts out of sync!");
case Intrinsic::x86_avx512_mask_add_ss_round:
case Intrinsic::x86_avx512_mask_add_sd_round:
- V = Builder->CreateFAdd(LHS, RHS);
+ V = Builder.CreateFAdd(LHS, RHS);
break;
case Intrinsic::x86_avx512_mask_sub_ss_round:
case Intrinsic::x86_avx512_mask_sub_sd_round:
- V = Builder->CreateFSub(LHS, RHS);
+ V = Builder.CreateFSub(LHS, RHS);
break;
case Intrinsic::x86_avx512_mask_mul_ss_round:
case Intrinsic::x86_avx512_mask_mul_sd_round:
- V = Builder->CreateFMul(LHS, RHS);
+ V = Builder.CreateFMul(LHS, RHS);
break;
case Intrinsic::x86_avx512_mask_div_ss_round:
case Intrinsic::x86_avx512_mask_div_sd_round:
- V = Builder->CreateFDiv(LHS, RHS);
+ V = Builder.CreateFDiv(LHS, RHS);
break;
}
@@ -2506,18 +2506,18 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// We don't need a select if we know the mask bit is a 1.
if (!C || !C->getValue()[0]) {
// Cast the mask to an i1 vector and then extract the lowest element.
- auto *MaskTy = VectorType::get(Builder->getInt1Ty(),
+ auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
cast<IntegerType>(Mask->getType())->getBitWidth());
- Mask = Builder->CreateBitCast(Mask, MaskTy);
- Mask = Builder->CreateExtractElement(Mask, (uint64_t)0);
+ Mask = Builder.CreateBitCast(Mask, MaskTy);
+ Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
// Extract the lowest element from the passthru operand.
- Value *Passthru = Builder->CreateExtractElement(II->getArgOperand(2),
+ Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
(uint64_t)0);
- V = Builder->CreateSelect(Mask, V, Passthru);
+ V = Builder.CreateSelect(Mask, V, Passthru);
}
// Insert the result back into the original argument 0.
- V = Builder->CreateInsertElement(Arg0, V, (uint64_t)0);
+ V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
return replaceInstUsesWith(*II, V);
}
@@ -2598,7 +2598,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx512_pslli_d_512:
case Intrinsic::x86_avx512_pslli_q_512:
case Intrinsic::x86_avx512_pslli_w_512:
- if (Value *V = simplifyX86immShift(*II, *Builder))
+ if (Value *V = simplifyX86immShift(*II, Builder))
return replaceInstUsesWith(*II, V);
break;
@@ -2629,7 +2629,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx512_psll_d_512:
case Intrinsic::x86_avx512_psll_q_512:
case Intrinsic::x86_avx512_psll_w_512: {
- if (Value *V = simplifyX86immShift(*II, *Builder))
+ if (Value *V = simplifyX86immShift(*II, Builder))
return replaceInstUsesWith(*II, V);
// SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
@@ -2673,7 +2673,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx512_psrlv_w_128:
case Intrinsic::x86_avx512_psrlv_w_256:
case Intrinsic::x86_avx512_psrlv_w_512:
- if (Value *V = simplifyX86varShift(*II, *Builder))
+ if (Value *V = simplifyX86varShift(*II, Builder))
return replaceInstUsesWith(*II, V);
break;
@@ -2683,7 +2683,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx2_pmulu_dq:
case Intrinsic::x86_avx512_pmul_dq_512:
case Intrinsic::x86_avx512_pmulu_dq_512: {
- if (Value *V = simplifyX86muldq(*II, *Builder))
+ if (Value *V = simplifyX86muldq(*II, Builder))
return replaceInstUsesWith(*II, V);
unsigned VWidth = II->getType()->getVectorNumElements();
@@ -2703,7 +2703,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx2_packsswb:
case Intrinsic::x86_avx512_packssdw_512:
case Intrinsic::x86_avx512_packsswb_512:
- if (Value *V = simplifyX86pack(*II, *this, *Builder, true))
+ if (Value *V = simplifyX86pack(*II, true))
return replaceInstUsesWith(*II, V);
break;
@@ -2713,7 +2713,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx2_packuswb:
case Intrinsic::x86_avx512_packusdw_512:
case Intrinsic::x86_avx512_packuswb_512:
- if (Value *V = simplifyX86pack(*II, *this, *Builder, false))
+ if (Value *V = simplifyX86pack(*II, false))
return replaceInstUsesWith(*II, V);
break;
@@ -2756,7 +2756,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
case Intrinsic::x86_sse41_insertps:
- if (Value *V = simplifyX86insertps(*II, *Builder))
+ if (Value *V = simplifyX86insertps(*II, Builder))
return replaceInstUsesWith(*II, V);
break;
@@ -2779,7 +2779,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
: nullptr;
// Attempt to simplify to a constant, shuffle vector or EXTRQI call.
- if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
+ if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
return replaceInstUsesWith(*II, V);
// EXTRQ only uses the lowest 64-bits of the first 128-bit vector
@@ -2811,7 +2811,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
// Attempt to simplify to a constant or shuffle vector.
- if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
+ if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
return replaceInstUsesWith(*II, V);
// EXTRQI only uses the lowest 64-bits of the first 128-bit vector
@@ -2843,7 +2843,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
const APInt &V11 = CI11->getValue();
APInt Len = V11.zextOrTrunc(6);
APInt Idx = V11.lshr(8).zextOrTrunc(6);
- if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
+ if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
return replaceInstUsesWith(*II, V);
}
@@ -2876,7 +2876,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (CILength && CIIndex) {
APInt Len = CILength->getValue().zextOrTrunc(6);
APInt Idx = CIIndex->getValue().zextOrTrunc(6);
- if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
+ if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
return replaceInstUsesWith(*II, V);
}
@@ -2930,7 +2930,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_ssse3_pshuf_b_128:
case Intrinsic::x86_avx2_pshuf_b:
case Intrinsic::x86_avx512_pshuf_b_512:
- if (Value *V = simplifyX86pshufb(*II, *Builder))
+ if (Value *V = simplifyX86pshufb(*II, Builder))
return replaceInstUsesWith(*II, V);
break;
@@ -2940,13 +2940,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx_vpermilvar_pd:
case Intrinsic::x86_avx_vpermilvar_pd_256:
case Intrinsic::x86_avx512_vpermilvar_pd_512:
- if (Value *V = simplifyX86vpermilvar(*II, *Builder))
+ if (Value *V = simplifyX86vpermilvar(*II, Builder))
return replaceInstUsesWith(*II, V);
break;
case Intrinsic::x86_avx2_permd:
case Intrinsic::x86_avx2_permps:
- if (Value *V = simplifyX86vpermv(*II, *Builder))
+ if (Value *V = simplifyX86vpermv(*II, Builder))
return replaceInstUsesWith(*II, V);
break;
@@ -2964,10 +2964,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx512_mask_permvar_sf_512:
case Intrinsic::x86_avx512_mask_permvar_si_256:
case Intrinsic::x86_avx512_mask_permvar_si_512:
- if (Value *V = simplifyX86vpermv(*II, *Builder)) {
+ if (Value *V = simplifyX86vpermv(*II, Builder)) {
// We simplified the permuting, now create a select for the masking.
V = emitX86MaskSelect(II->getArgOperand(3), V, II->getArgOperand(2),
- *Builder);
+ Builder);
return replaceInstUsesWith(*II, V);
}
break;
@@ -2976,7 +2976,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx_vperm2f128_ps_256:
case Intrinsic::x86_avx_vperm2f128_si_256:
case Intrinsic::x86_avx2_vperm2i128:
- if (Value *V = simplifyX86vperm2(*II, *Builder))
+ if (Value *V = simplifyX86vperm2(*II, Builder))
return replaceInstUsesWith(*II, V);
break;
@@ -3009,7 +3009,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_xop_vpcomd:
case Intrinsic::x86_xop_vpcomq:
case Intrinsic::x86_xop_vpcomw:
- if (Value *V = simplifyX86vpcom(*II, *Builder, true))
+ if (Value *V = simplifyX86vpcom(*II, Builder, true))
return replaceInstUsesWith(*II, V);
break;
@@ -3017,7 +3017,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_xop_vpcomud:
case Intrinsic::x86_xop_vpcomuq:
case Intrinsic::x86_xop_vpcomuw:
- if (Value *V = simplifyX86vpcom(*II, *Builder, false))
+ if (Value *V = simplifyX86vpcom(*II, Builder, false))
return replaceInstUsesWith(*II, V);
break;
@@ -3044,10 +3044,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (AllEltsOk) {
// Cast the input vectors to byte vectors.
- Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
- Mask->getType());
- Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
- Mask->getType());
+ Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
+ Mask->getType());
+ Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
+ Mask->getType());
Value *Result = UndefValue::get(Op0->getType());
// Only extract each element once.
@@ -3067,13 +3067,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
ExtractedElts[Idx] =
- Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
- Builder->getInt32(Idx&15));
+ Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
+ Builder.getInt32(Idx&15));
}
// Insert this value into the result vector.
- Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
- Builder->getInt32(i));
+ Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
+ Builder.getInt32(i));
}
return CastInst::Create(Instruction::BitCast, Result, CI.getType());
}
@@ -3238,7 +3238,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (Mask == (S_NAN | Q_NAN)) {
// Equivalent of isnan. Replace with standard fcmp.
- Value *FCmp = Builder->CreateFCmpUNO(Src0, Src0);
+ Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
FCmp->takeName(II);
return replaceInstUsesWith(*II, FCmp);
}
@@ -3250,7 +3250,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Clamp mask to used bits
if ((Mask & FullMask) != Mask) {
- CallInst *NewCall = Builder->CreateCall(II->getCalledFunction(),
+ CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
{ Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
);
@@ -3343,13 +3343,12 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// TODO: Also emit sub if only width is constant.
if (!CWidth && COffset && Offset == 0) {
Constant *KSize = ConstantInt::get(COffset->getType(), IntSize);
- Value *ShiftVal = Builder->CreateSub(KSize, II->getArgOperand(2));
- ShiftVal = Builder->CreateZExt(ShiftVal, II->getType());
+ Value *ShiftVal = Builder.CreateSub(KSize, II->getArgOperand(2));
+ ShiftVal = Builder.CreateZExt(ShiftVal, II->getType());
- Value *Shl = Builder->CreateShl(Src, ShiftVal);
- Value *RightShift = Signed ?
- Builder->CreateAShr(Shl, ShiftVal) :
- Builder->CreateLShr(Shl, ShiftVal);
+ Value *Shl = Builder.CreateShl(Src, ShiftVal);
+ Value *RightShift = Signed ? Builder.CreateAShr(Shl, ShiftVal)
+ : Builder.CreateLShr(Shl, ShiftVal);
RightShift->takeName(II);
return replaceInstUsesWith(*II, RightShift);
}
@@ -3360,17 +3359,15 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// TODO: This allows folding to undef when the hardware has specific
// behavior?
if (Offset + Width < IntSize) {
- Value *Shl = Builder->CreateShl(Src, IntSize - Offset - Width);
- Value *RightShift = Signed ?
- Builder->CreateAShr(Shl, IntSize - Width) :
- Builder->CreateLShr(Shl, IntSize - Width);
+ Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
+ Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
+ : Builder.CreateLShr(Shl, IntSize - Width);
RightShift->takeName(II);
return replaceInstUsesWith(*II, RightShift);
}
- Value *RightShift = Signed ?
- Builder->CreateAShr(Src, Offset) :
- Builder->CreateLShr(Src, Offset);
+ Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
+ : Builder.CreateLShr(Src, Offset);
RightShift->takeName(II);
return replaceInstUsesWith(*II, RightShift);
@@ -3439,7 +3436,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
- CallInst *NewCall = Builder->CreateMinNum(Src0, Src1);
+ CallInst *NewCall = Builder.CreateMinNum(Src0, Src1);
NewCall->copyFastMathFlags(II);
NewCall->takeName(II);
return replaceInstUsesWith(*II, NewCall);
@@ -3451,7 +3448,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
C2->getValueAPF());
return replaceInstUsesWith(*II,
- ConstantFP::get(Builder->getContext(), Result));
+ ConstantFP::get(Builder.getContext(), Result));
}
}
}
@@ -3494,7 +3491,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
MDNode *MD = MDNode::get(II->getContext(), MDArgs);
Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
- CallInst *NewCall = Builder->CreateCall(NewF, Args);
+ CallInst *NewCall = Builder.CreateCall(NewF, Args);
NewCall->addAttribute(AttributeList::FunctionIndex,
Attribute::Convergent);
NewCall->takeName(II);
@@ -3556,7 +3553,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
SrcLHS->getType());
Value *Args[] = { SrcLHS, SrcRHS,
ConstantInt::get(CC->getType(), SrcPred) };
- CallInst *NewCall = Builder->CreateCall(NewF, Args);
+ CallInst *NewCall = Builder.CreateCall(NewF, Args);
NewCall->takeName(II);
return replaceInstUsesWith(*II, NewCall);
}
@@ -3633,16 +3630,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// the InstCombineIRInserter object.
Value *AssumeIntrinsic = II->getCalledValue(), *A, *B;
if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
- Builder->CreateCall(AssumeIntrinsic, A, II->getName());
- Builder->CreateCall(AssumeIntrinsic, B, II->getName());
+ Builder.CreateCall(AssumeIntrinsic, A, II->getName());
+ Builder.CreateCall(AssumeIntrinsic, B, II->getName());
return eraseInstFromFunction(*II);
}
// assume(!(a || b)) -> assume(!a); assume(!b);
if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
- Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
- II->getName());
- Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
- II->getName());
+ Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(A), II->getName());
+ Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(B), II->getName());
return eraseInstFromFunction(*II);
}
@@ -3726,7 +3721,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return eraseInstFromFunction(*NextInst);
// Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
- II->setArgOperand(0, Builder->CreateAnd(CurrCond, NextCond));
+ II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
return eraseInstFromFunction(*NextInst);
}
break;
@@ -4163,7 +4158,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Value *NewArg = *AI;
if ((*AI)->getType() != ParamTy)
- NewArg = Builder->CreateBitOrPointerCast(*AI, ParamTy);
+ NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
Args.push_back(NewArg);
// Add any parameter attributes.
@@ -4189,7 +4184,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// Must promote to pass through va_arg area!
Instruction::CastOps opcode =
CastInst::getCastOpcode(*AI, false, PTy, false);
- NewArg = Builder->CreateCast(opcode, *AI, PTy);
+ NewArg = Builder.CreateCast(opcode, *AI, PTy);
}
Args.push_back(NewArg);
@@ -4215,10 +4210,10 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
CallSite NewCS;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
- NewCS = Builder->CreateInvoke(Callee, II->getNormalDest(),
- II->getUnwindDest(), Args, OpBundles);
+ NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(),
+ II->getUnwindDest(), Args, OpBundles);
} else {
- NewCS = Builder->CreateCall(Callee, Args, OpBundles);
+ NewCS = Builder.CreateCall(Callee, Args, OpBundles);
cast<CallInst>(NewCS.getInstruction())
->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind());
}
@@ -4328,7 +4323,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// Add the chain argument and attributes.
Value *NestVal = Tramp->getArgOperand(2);
if (NestVal->getType() != NestTy)
- NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
+ NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
NewArgs.push_back(NestVal);
NewArgAttrs.push_back(NestAttr);
}
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index d3049389dfb9..dfdfd3e9da84 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -84,7 +84,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst &AI) {
PointerType *PTy = cast<PointerType>(CI.getType());
- BuilderTy AllocaBuilder(*Builder);
+ BuilderTy AllocaBuilder(Builder);
AllocaBuilder.SetInsertPoint(&AI);
// Get the type really allocated and the type casted to.
@@ -406,8 +406,7 @@ static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC,
/// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
/// --->
/// extractelement <4 x i32> %X, 1
-static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC,
- const DataLayout &DL) {
+static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC) {
Value *TruncOp = Trunc.getOperand(0);
Type *DestType = Trunc.getType();
if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
@@ -434,14 +433,14 @@ static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC,
unsigned NumVecElts = VecWidth / DestWidth;
if (VecType->getElementType() != DestType) {
VecType = VectorType::get(DestType, NumVecElts);
- VecInput = IC.Builder->CreateBitCast(VecInput, VecType, "bc");
+ VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
}
unsigned Elt = ShiftAmount / DestWidth;
- if (DL.isBigEndian())
+ if (IC.getDataLayout().isBigEndian())
Elt = NumVecElts - 1 - Elt;
- return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
+ return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
}
/// Try to narrow the width of bitwise logic instructions with constants.
@@ -460,7 +459,7 @@ Instruction *InstCombiner::shrinkBitwiseLogic(TruncInst &Trunc) {
// trunc (logic X, C) --> logic (trunc X, C')
Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
- Value *NarrowOp0 = Builder->CreateTrunc(LogicOp->getOperand(0), DestTy);
+ Value *NarrowOp0 = Builder.CreateTrunc(LogicOp->getOperand(0), DestTy);
return BinaryOperator::Create(LogicOp->getOpcode(), NarrowOp0, NarrowC);
}
@@ -554,7 +553,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector.
if (DestTy->getScalarSizeInBits() == 1) {
Constant *One = ConstantInt::get(SrcTy, 1);
- Src = Builder->CreateAnd(Src, One);
+ Src = Builder.CreateAnd(Src, One);
Value *Zero = Constant::getNullValue(Src->getType());
return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
}
@@ -580,7 +579,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// Since we're doing an lshr and a zero extend, and know that the shift
// amount is smaller than ASize, it is always safe to do the shift in A's
// type, then zero extend or truncate to the result.
- Value *Shift = Builder->CreateLShr(A, Cst->getZExtValue());
+ Value *Shift = Builder.CreateLShr(A, Cst->getZExtValue());
Shift->takeName(Src);
return CastInst::CreateIntegerCast(Shift, DestTy, false);
}
@@ -610,7 +609,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(),
std::min(ShiftAmt, ASize - 1)));
if (SExt->hasOneUse()) {
- Value *Shift = Builder->CreateAShr(A, std::min(ShiftAmt, ASize-1));
+ Value *Shift = Builder.CreateAShr(A, std::min(ShiftAmt, ASize - 1));
Shift->takeName(Src);
return CastInst::CreateIntegerCast(Shift, CI.getType(), true);
}
@@ -620,10 +619,10 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
if (Instruction *I = shrinkBitwiseLogic(CI))
return I;
- if (Instruction *I = shrinkSplatShuffle(CI, *Builder))
+ if (Instruction *I = shrinkSplatShuffle(CI, Builder))
return I;
- if (Instruction *I = shrinkInsertElt(CI, *Builder))
+ if (Instruction *I = shrinkInsertElt(CI, Builder))
return I;
if (Src->hasOneUse() && isa<IntegerType>(SrcTy) &&
@@ -636,7 +635,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// FoldShiftByConstant and is the extend in reg pattern.
const unsigned DestSize = DestTy->getScalarSizeInBits();
if (Cst->getValue().ult(DestSize)) {
- Value *NewTrunc = Builder->CreateTrunc(A, DestTy, A->getName() + ".tr");
+ Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
return BinaryOperator::Create(
Instruction::Shl, NewTrunc,
@@ -645,7 +644,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
}
}
- if (Instruction *I = foldVecTruncToExtElt(CI, *this, DL))
+ if (Instruction *I = foldVecTruncToExtElt(CI, *this))
return I;
return nullptr;
@@ -668,13 +667,13 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
Value *In = ICI->getOperand(0);
Value *Sh = ConstantInt::get(In->getType(),
In->getType()->getScalarSizeInBits() - 1);
- In = Builder->CreateLShr(In, Sh, In->getName() + ".lobit");
+ In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
if (In->getType() != CI.getType())
- In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/);
+ In = Builder.CreateIntCast(In, CI.getType(), false /*ZExt*/);
if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
Constant *One = ConstantInt::get(In->getType(), 1);
- In = Builder->CreateXor(In, One, In->getName() + ".not");
+ In = Builder.CreateXor(In, One, In->getName() + ".not");
}
return replaceInstUsesWith(CI, In);
@@ -713,19 +712,19 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
if (ShAmt) {
// Perform a logical shr by shiftamt.
// Insert the shift to put the result in the low bit.
- In = Builder->CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
- In->getName() + ".lobit");
+ In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
+ In->getName() + ".lobit");
}
if (!Op1CV.isNullValue() == isNE) { // Toggle the low bit.
Constant *One = ConstantInt::get(In->getType(), 1);
- In = Builder->CreateXor(In, One);
+ In = Builder.CreateXor(In, One);
}
if (CI.getType() == In->getType())
return replaceInstUsesWith(CI, In);
- Value *IntCast = Builder->CreateIntCast(In, CI.getType(), false);
+ Value *IntCast = Builder.CreateIntCast(In, CI.getType(), false);
return replaceInstUsesWith(CI, IntCast);
}
}
@@ -748,19 +747,19 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
if (UnknownBit.countPopulation() == 1) {
if (!DoTransform) return ICI;
- Value *Result = Builder->CreateXor(LHS, RHS);
+ Value *Result = Builder.CreateXor(LHS, RHS);
// Mask off any bits that are set and won't be shifted away.
if (KnownLHS.One.uge(UnknownBit))
- Result = Builder->CreateAnd(Result,
+ Result = Builder.CreateAnd(Result,
ConstantInt::get(ITy, UnknownBit));
// Shift the bit we're testing down to the lsb.
- Result = Builder->CreateLShr(
+ Result = Builder.CreateLShr(
Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
- Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1));
+ Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
Result->takeName(ICI);
return replaceInstUsesWith(CI, Result);
}
@@ -960,7 +959,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
if (SrcSize < DstSize) {
APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
- Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
+ Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
return new ZExtInst(And, CI.getType());
}
@@ -970,7 +969,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
AndValue));
}
if (SrcSize > DstSize) {
- Value *Trunc = Builder->CreateTrunc(A, CI.getType());
+ Value *Trunc = Builder.CreateTrunc(A, CI.getType());
APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
return BinaryOperator::CreateAnd(Trunc,
ConstantInt::get(Trunc->getType(),
@@ -992,8 +991,8 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
(transformZExtICmp(LHS, CI, false) ||
transformZExtICmp(RHS, CI, false))) {
// zext (or icmp, icmp) -> or (zext icmp), (zext icmp)
- Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName());
- Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName());
+ Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName());
+ Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName());
BinaryOperator *Or = BinaryOperator::Create(Instruction::Or, LCast, RCast);
// Perform the elimination.
@@ -1020,7 +1019,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
X->getType() == CI.getType()) {
Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
- return BinaryOperator::CreateXor(Builder->CreateAnd(X, ZC), ZC);
+ return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
}
return nullptr;
@@ -1043,12 +1042,12 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
Value *Sh = ConstantInt::get(Op0->getType(),
Op0->getType()->getScalarSizeInBits()-1);
- Value *In = Builder->CreateAShr(Op0, Sh, Op0->getName()+".lobit");
+ Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
if (In->getType() != CI.getType())
- In = Builder->CreateIntCast(In, CI.getType(), true/*SExt*/);
+ In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
if (Pred == ICmpInst::ICMP_SGT)
- In = Builder->CreateNot(In, In->getName()+".not");
+ In = Builder.CreateNot(In, In->getName() + ".not");
return replaceInstUsesWith(CI, In);
}
}
@@ -1079,26 +1078,26 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
// Perform a right shift to place the desired bit in the LSB.
if (ShiftAmt)
- In = Builder->CreateLShr(In,
- ConstantInt::get(In->getType(), ShiftAmt));
+ In = Builder.CreateLShr(In,
+ ConstantInt::get(In->getType(), ShiftAmt));
// At this point "In" is either 1 or 0. Subtract 1 to turn
// {1, 0} -> {0, -1}.
- In = Builder->CreateAdd(In,
- ConstantInt::getAllOnesValue(In->getType()),
- "sext");
+ In = Builder.CreateAdd(In,
+ ConstantInt::getAllOnesValue(In->getType()),
+ "sext");
} else {
// sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1
// sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
// Perform a left shift to place the desired bit in the MSB.
if (ShiftAmt)
- In = Builder->CreateShl(In,
- ConstantInt::get(In->getType(), ShiftAmt));
+ In = Builder.CreateShl(In,
+ ConstantInt::get(In->getType(), ShiftAmt));
// Distribute the bit over the whole bit width.
- In = Builder->CreateAShr(In, ConstantInt::get(In->getType(),
- KnownZeroMask.getBitWidth() - 1), "sext");
+ In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
+ KnownZeroMask.getBitWidth() - 1), "sext");
}
if (CI.getType() == In->getType())
@@ -1191,7 +1190,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// instead.
KnownBits Known = computeKnownBits(Src, 0, &CI);
if (Known.isNonNegative()) {
- Value *ZExt = Builder->CreateZExt(Src, DestTy);
+ Value *ZExt = Builder.CreateZExt(Src, DestTy);
return replaceInstUsesWith(CI, ZExt);
}
@@ -1217,7 +1216,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// We need to emit a shl + ashr to do the sign extend.
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
- return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"),
+ return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
ShAmt);
}
@@ -1229,7 +1228,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
unsigned DestBitSize = DestTy->getScalarSizeInBits();
Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
- return BinaryOperator::CreateAShr(Builder->CreateShl(X, ShAmt), ShAmt);
+ return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
}
if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
@@ -1258,7 +1257,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
- A = Builder->CreateShl(A, ShAmtV, CI.getName());
+ A = Builder.CreateShl(A, ShAmtV, CI.getName());
return BinaryOperator::CreateAShr(A, ShAmtV);
}
@@ -1347,9 +1346,9 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// case of interest here is (float)((double)float + float)).
if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
if (LHSOrig->getType() != CI.getType())
- LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
+ LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
if (RHSOrig->getType() != CI.getType())
- RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
+ RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
Instruction *RI =
BinaryOperator::Create(OpI->getOpcode(), LHSOrig, RHSOrig);
RI->copyFastMathFlags(OpI);
@@ -1364,9 +1363,9 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// in the destination format if it can represent both sources.
if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
if (LHSOrig->getType() != CI.getType())
- LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
+ LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
if (RHSOrig->getType() != CI.getType())
- RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
+ RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
Instruction *RI =
BinaryOperator::CreateFMul(LHSOrig, RHSOrig);
RI->copyFastMathFlags(OpI);
@@ -1382,9 +1381,9 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// TODO: Tighten bound via rigorous analysis of the unbalanced case.
if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
if (LHSOrig->getType() != CI.getType())
- LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
+ LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
if (RHSOrig->getType() != CI.getType())
- RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
+ RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
Instruction *RI =
BinaryOperator::CreateFDiv(LHSOrig, RHSOrig);
RI->copyFastMathFlags(OpI);
@@ -1399,11 +1398,11 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
if (SrcWidth == OpWidth)
break;
if (LHSWidth < SrcWidth)
- LHSOrig = Builder->CreateFPExt(LHSOrig, RHSOrig->getType());
+ LHSOrig = Builder.CreateFPExt(LHSOrig, RHSOrig->getType());
else if (RHSWidth <= SrcWidth)
- RHSOrig = Builder->CreateFPExt(RHSOrig, LHSOrig->getType());
+ RHSOrig = Builder.CreateFPExt(RHSOrig, LHSOrig->getType());
if (LHSOrig != OpI->getOperand(0) || RHSOrig != OpI->getOperand(1)) {
- Value *ExactResult = Builder->CreateFRem(LHSOrig, RHSOrig);
+ Value *ExactResult = Builder.CreateFRem(LHSOrig, RHSOrig);
if (Instruction *RI = dyn_cast<Instruction>(ExactResult))
RI->copyFastMathFlags(OpI);
return CastInst::CreateFPCast(ExactResult, CI.getType());
@@ -1412,8 +1411,8 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// (fptrunc (fneg x)) -> (fneg (fptrunc x))
if (BinaryOperator::isFNeg(OpI)) {
- Value *InnerTrunc = Builder->CreateFPTrunc(OpI->getOperand(1),
- CI.getType());
+ Value *InnerTrunc = Builder.CreateFPTrunc(OpI->getOperand(1),
+ CI.getType());
Instruction *RI = BinaryOperator::CreateFNeg(InnerTrunc);
RI->copyFastMathFlags(OpI);
return RI;
@@ -1432,10 +1431,8 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
(isa<ConstantFP>(SI->getOperand(1)) ||
isa<ConstantFP>(SI->getOperand(2))) &&
matchSelectPattern(SI, LHS, RHS).Flavor == SPF_UNKNOWN) {
- Value *LHSTrunc = Builder->CreateFPTrunc(SI->getOperand(1),
- CI.getType());
- Value *RHSTrunc = Builder->CreateFPTrunc(SI->getOperand(2),
- CI.getType());
+ Value *LHSTrunc = Builder.CreateFPTrunc(SI->getOperand(1), CI.getType());
+ Value *RHSTrunc = Builder.CreateFPTrunc(SI->getOperand(2), CI.getType());
return SelectInst::Create(SI->getOperand(0), LHSTrunc, RHSTrunc);
}
@@ -1465,7 +1462,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// Do unary FP operation on smaller type.
// (fptrunc (fabs x)) -> (fabs (fptrunc x))
- Value *InnerTrunc = Builder->CreateFPTrunc(Src, CI.getType());
+ Value *InnerTrunc = Builder.CreateFPTrunc(Src, CI.getType());
Type *IntrinsicType[] = { CI.getType() };
Function *Overload = Intrinsic::getDeclaration(
CI.getModule(), II->getIntrinsicID(), IntrinsicType);
@@ -1482,7 +1479,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
}
}
- if (Instruction *I = shrinkInsertElt(CI, *Builder))
+ if (Instruction *I = shrinkInsertElt(CI, Builder))
return I;
return nullptr;
@@ -1577,7 +1574,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
- Value *P = Builder->CreateZExtOrTrunc(CI.getOperand(0), Ty);
+ Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
return new IntToPtrInst(P, CI.getType());
}
@@ -1627,7 +1624,7 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
if (Ty->isVectorTy()) // Handle vectors of pointers.
PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
- Value *P = Builder->CreatePtrToInt(CI.getOperand(0), PtrTy);
+ Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy);
return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
}
@@ -1653,7 +1650,7 @@ static Instruction *optimizeVectorResize(Value *InVal, VectorType *DestTy,
return nullptr;
SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
- InVal = IC.Builder->CreateBitCast(InVal, SrcTy);
+ InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
}
// Now that the element types match, get the shuffle mask and RHS of the
@@ -1833,8 +1830,8 @@ static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
if (!Elements[i]) continue; // Unset element.
- Result = IC.Builder->CreateInsertElement(Result, Elements[i],
- IC.Builder->getInt32(i));
+ Result = IC.Builder.CreateInsertElement(Result, Elements[i],
+ IC.Builder.getInt32(i));
}
return Result;
@@ -1845,8 +1842,7 @@ static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
/// vectors better than bitcasts of scalars because vector registers are
/// usually not type-specific like scalar integer or scalar floating-point.
static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
- InstCombiner &IC,
- const DataLayout &DL) {
+ InstCombiner &IC) {
// TODO: Create and use a pattern matcher for ExtractElementInst.
auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0));
if (!ExtElt || !ExtElt->hasOneUse())
@@ -1860,8 +1856,8 @@ static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements();
auto *NewVecType = VectorType::get(DestType, NumElts);
- auto *NewBC = IC.Builder->CreateBitCast(ExtElt->getVectorOperand(),
- NewVecType, "bc");
+ auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(),
+ NewVecType, "bc");
return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
}
@@ -1870,7 +1866,7 @@ static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast,
InstCombiner::BuilderTy &Builder) {
Type *DestTy = BitCast.getType();
BinaryOperator *BO;
- if (!DestTy->getScalarType()->isIntegerTy() ||
+ if (!DestTy->isIntOrIntVectorTy() ||
!match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
!BO->isBitwiseLogicOp())
return nullptr;
@@ -2033,8 +2029,8 @@ Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
// For each old PHI node, create a corresponding new PHI node with a type A.
SmallDenseMap<PHINode *, PHINode *> NewPNodes;
for (auto *OldPN : OldPhiNodes) {
- Builder->SetInsertPoint(OldPN);
- PHINode *NewPN = Builder->CreatePHI(DestTy, OldPN->getNumOperands());
+ Builder.SetInsertPoint(OldPN);
+ PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
NewPNodes[OldPN] = NewPN;
}
@@ -2047,8 +2043,8 @@ Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
if (auto *C = dyn_cast<Constant>(V)) {
NewV = ConstantExpr::getBitCast(C, DestTy);
} else if (auto *LI = dyn_cast<LoadInst>(V)) {
- Builder->SetInsertPoint(LI->getNextNode());
- NewV = Builder->CreateBitCast(LI, DestTy);
+ Builder.SetInsertPoint(LI->getNextNode());
+ NewV = Builder.CreateBitCast(LI, DestTy);
Worklist.Add(LI);
} else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
NewV = BCI->getOperand(0);
@@ -2064,9 +2060,9 @@ Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
for (User *U : PN->users()) {
auto *SI = dyn_cast<StoreInst>(U);
if (SI && SI->isSimple() && SI->getOperand(0) == PN) {
- Builder->SetInsertPoint(SI);
+ Builder.SetInsertPoint(SI);
auto *NewBC =
- cast<BitCastInst>(Builder->CreateBitCast(NewPNodes[PN], SrcTy));
+ cast<BitCastInst>(Builder.CreateBitCast(NewPNodes[PN], SrcTy));
SI->setOperand(0, NewBC);
Worklist.Add(SI);
assert(hasStoreUsersOnly(*NewBC));
@@ -2121,14 +2117,14 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// If we found a path from the src to dest, create the getelementptr now.
if (SrcElTy == DstElTy) {
- SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder->getInt32(0));
+ SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
return GetElementPtrInst::CreateInBounds(Src, Idxs);
}
}
if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
- Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
+ Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
// FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
@@ -2161,7 +2157,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// scalar-scalar cast.
if (!DestTy->isVectorTy()) {
Value *Elem =
- Builder->CreateExtractElement(Src,
+ Builder.CreateExtractElement(Src,
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
return CastInst::Create(Instruction::BitCast, Elem, DestTy);
}
@@ -2190,8 +2186,8 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
Tmp->getOperand(0)->getType() == DestTy) ||
((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
Tmp->getOperand(0)->getType() == DestTy)) {
- Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
- Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
+ Value *LHS = Builder.CreateBitCast(SVI->getOperand(0), DestTy);
+ Value *RHS = Builder.CreateBitCast(SVI->getOperand(1), DestTy);
// Return a new shuffle vector. Use the same element ID's, as we
// know the vector types match #elts.
return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
@@ -2204,13 +2200,13 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (Instruction *I = optimizeBitCastFromPhi(CI, PN))
return I;
- if (Instruction *I = canonicalizeBitCastExtElt(CI, *this, DL))
+ if (Instruction *I = canonicalizeBitCastExtElt(CI, *this))
return I;
- if (Instruction *I = foldBitCastBitwiseLogic(CI, *Builder))
+ if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder))
return I;
- if (Instruction *I = foldBitCastSelect(CI, *Builder))
+ if (Instruction *I = foldBitCastSelect(CI, Builder))
return I;
if (SrcTy->isPointerTy())
@@ -2234,7 +2230,7 @@ Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
MidTy = VectorType::get(MidTy, VT->getNumElements());
}
- Value *NewBitCast = Builder->CreateBitCast(Src, MidTy);
+ Value *NewBitCast = Builder.CreateBitCast(Src, MidTy);
return new AddrSpaceCastInst(NewBitCast, CI.getType());
}
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 58b8b2f52629..60d1cde971dd 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -392,7 +392,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
- Idx = Builder->CreateTrunc(Idx, IntPtrTy);
+ Idx = Builder.CreateTrunc(Idx, IntPtrTy);
}
// If the comparison is only true for one or two elements, emit direct
@@ -400,7 +400,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
if (SecondTrueElement != Overdefined) {
// None true -> false.
if (FirstTrueElement == Undefined)
- return replaceInstUsesWith(ICI, Builder->getFalse());
+ return replaceInstUsesWith(ICI, Builder.getFalse());
Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
@@ -409,9 +409,9 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
// True for two elements -> 'i == 47 | i == 72'.
- Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx);
+ Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
- Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx);
+ Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
return BinaryOperator::CreateOr(C1, C2);
}
@@ -420,7 +420,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
if (SecondFalseElement != Overdefined) {
// None false -> true.
if (FirstFalseElement == Undefined)
- return replaceInstUsesWith(ICI, Builder->getTrue());
+ return replaceInstUsesWith(ICI, Builder.getTrue());
Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
@@ -429,9 +429,9 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
// False for two elements -> 'i != 47 & i != 72'.
- Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx);
+ Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
- Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx);
+ Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
return BinaryOperator::CreateAnd(C1, C2);
}
@@ -443,7 +443,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
// Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
if (FirstTrueElement) {
Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
- Idx = Builder->CreateAdd(Idx, Offs);
+ Idx = Builder.CreateAdd(Idx, Offs);
}
Value *End = ConstantInt::get(Idx->getType(),
@@ -457,7 +457,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
// Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
if (FirstFalseElement) {
Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
- Idx = Builder->CreateAdd(Idx, Offs);
+ Idx = Builder.CreateAdd(Idx, Offs);
}
Value *End = ConstantInt::get(Idx->getType(),
@@ -481,9 +481,9 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
if (Ty) {
- Value *V = Builder->CreateIntCast(Idx, Ty, false);
- V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
- V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
+ Value *V = Builder.CreateIntCast(Idx, Ty, false);
+ V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
+ V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
}
}
@@ -566,7 +566,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
// we don't need to bother extending: the extension won't affect where the
// computation crosses zero.
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
- VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
+ VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
}
return VariableIdx;
}
@@ -588,10 +588,10 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
// Okay, we can do this evaluation. Start by converting the index to intptr.
if (VariableIdx->getType() != IntPtrTy)
- VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
+ VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
true /*Signed*/);
Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
- return IC.Builder->CreateAdd(VariableIdx, OffsetVal, "offset");
+ return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
}
/// Returns true if we can rewrite Start as a GEP with pointer Base
@@ -981,13 +981,13 @@ Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
if (LHSIndexTy != RHSIndexTy) {
if (LHSIndexTy->getPrimitiveSizeInBits() <
RHSIndexTy->getPrimitiveSizeInBits()) {
- ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy);
+ ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
} else
- LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy);
+ LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
}
- Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
- LOffset, ROffset);
+ Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
+ LOffset, ROffset);
return replaceInstUsesWith(I, Cmp);
}
@@ -1026,7 +1026,7 @@ Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
if (NumDifferences == 0) // SAME GEP?
return replaceInstUsesWith(I, // No comparison is needed here.
- Builder->getInt1(ICmpInst::isTrueWhenEqual(Cond)));
+ Builder.getInt1(ICmpInst::isTrueWhenEqual(Cond)));
else if (NumDifferences == 1 && GEPsInBounds) {
Value *LHSV = GEPLHS->getOperand(DiffOperand);
@@ -1174,7 +1174,7 @@ Instruction *InstCombiner::foldICmpAddOpConst(Instruction &ICI,
// (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
- Constant *C = Builder->getInt(CI->getValue()-1);
+ Constant *C = Builder.getInt(CI->getValue() - 1);
return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
}
@@ -1347,17 +1347,17 @@ static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
Value *F = Intrinsic::getDeclaration(I.getModule(),
Intrinsic::sadd_with_overflow, NewType);
- InstCombiner::BuilderTy *Builder = IC.Builder;
+ InstCombiner::BuilderTy &Builder = IC.Builder;
// Put the new code above the original add, in case there are any uses of the
// add between the add and the compare.
- Builder->SetInsertPoint(OrigAdd);
+ Builder.SetInsertPoint(OrigAdd);
- Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName() + ".trunc");
- Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName() + ".trunc");
- CallInst *Call = Builder->CreateCall(F, {TruncA, TruncB}, "sadd");
- Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result");
- Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType());
+ Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
+ Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
+ CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
+ Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
+ Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
// The inner add was the result of the narrow add, zero extended to the
// wider type. Replace it with the result computed by the intrinsic.
@@ -1434,9 +1434,9 @@ Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
ConstantRange Intersection = DominatingCR.intersectWith(CR);
ConstantRange Difference = DominatingCR.difference(CR);
if (Intersection.isEmptySet())
- return replaceInstUsesWith(Cmp, Builder->getFalse());
+ return replaceInstUsesWith(Cmp, Builder.getFalse());
if (Difference.isEmptySet())
- return replaceInstUsesWith(Cmp, Builder->getTrue());
+ return replaceInstUsesWith(Cmp, Builder.getTrue());
// If this is a normal comparison, it demands all bits. If it is a sign
// bit comparison, it only demands the sign bit.
@@ -1452,9 +1452,9 @@ Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
return nullptr;
if (auto *AI = Intersection.getSingleElement())
- return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder->getInt(*AI));
+ return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*AI));
if (auto *AD = Difference.getSingleElement())
- return new ICmpInst(ICmpInst::ICMP_NE, X, Builder->getInt(*AD));
+ return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*AD));
}
return nullptr;
@@ -1628,11 +1628,11 @@ Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
!Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
// Compute C2 << Y.
Value *NewShift =
- IsShl ? Builder->CreateLShr(And->getOperand(1), Shift->getOperand(1))
- : Builder->CreateShl(And->getOperand(1), Shift->getOperand(1));
+ IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
+ : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
// Compute X & (C2 << Y).
- Value *NewAnd = Builder->CreateAnd(Shift->getOperand(0), NewShift);
+ Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
Cmp.setOperand(0, NewAnd);
return &Cmp;
}
@@ -1670,7 +1670,7 @@ Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
unsigned WideScalarBits = WideType->getScalarSizeInBits();
Constant *ZextC1 = ConstantInt::get(WideType, C1->zext(WideScalarBits));
Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
- Value *NewAnd = Builder->CreateAnd(W, ZextC2, And->getName());
+ Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
}
}
@@ -1704,12 +1704,12 @@ Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
} else {
if (UsesRemoved >= 3)
- NewOr = Builder->CreateOr(Builder->CreateShl(One, B, LShr->getName(),
- /*HasNUW=*/true),
- One, Or->getName());
+ NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
+ /*HasNUW=*/true),
+ One, Or->getName());
}
if (NewOr) {
- Value *NewAnd = Builder->CreateAnd(A, NewOr, And->getName());
+ Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
Cmp.setOperand(0, NewAnd);
return &Cmp;
}
@@ -1772,7 +1772,7 @@ Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp,
Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
if (And->getType()->isVectorTy())
NTy = VectorType::get(NTy, And->getType()->getVectorNumElements());
- Value *Trunc = Builder->CreateTrunc(X, NTy);
+ Value *Trunc = Builder.CreateTrunc(X, NTy);
auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
: CmpInst::ICMP_SLT;
return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
@@ -1811,9 +1811,9 @@ Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
// Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
// -> and (icmp eq P, null), (icmp eq Q, null).
Value *CmpP =
- Builder->CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
+ Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
Value *CmpQ =
- Builder->CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
+ Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
auto LogicOpc = Pred == ICmpInst::Predicate::ICMP_EQ ? Instruction::And
: Instruction::Or;
return BinaryOperator::Create(LogicOpc, CmpP, CmpQ);
@@ -1993,7 +1993,7 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
Constant *Mask = ConstantInt::get(
ShType,
APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
- Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask");
+ Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
Constant *LShrC = ConstantInt::get(ShType, C->lshr(*ShiftAmt));
return new ICmpInst(Pred, And, LShrC);
}
@@ -2005,7 +2005,7 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
Constant *Mask = ConstantInt::get(
ShType,
APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
- Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask");
+ Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
And, Constant::getNullValue(ShType));
}
@@ -2024,7 +2024,7 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements());
Constant *NewC =
ConstantInt::get(TruncTy, C->ashr(*ShiftAmt).trunc(TypeBits - Amt));
- return new ICmpInst(Pred, Builder->CreateTrunc(X, TruncTy), NewC);
+ return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
}
return nullptr;
@@ -2076,8 +2076,8 @@ Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
Constant *DivCst = ConstantInt::get(
Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal));
- Value *Tmp = IsAShr ? Builder->CreateSDiv(X, DivCst, "", Shr->isExact())
- : Builder->CreateUDiv(X, DivCst, "", Shr->isExact());
+ Value *Tmp = IsAShr ? Builder.CreateSDiv(X, DivCst, "", Shr->isExact())
+ : Builder.CreateUDiv(X, DivCst, "", Shr->isExact());
Cmp.setOperand(0, Tmp);
@@ -2115,7 +2115,7 @@ Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
// Otherwise strength reduce the shift into an 'and'.
APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
Constant *Mask = ConstantInt::get(Shr->getType(), Val);
- Value *And = Builder->CreateAnd(X, Mask, Shr->getName() + ".mask");
+ Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
return new ICmpInst(Pred, And, ShiftedCmpRHS);
}
@@ -2279,7 +2279,7 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
default: llvm_unreachable("Unhandled icmp opcode!");
case ICmpInst::ICMP_EQ:
if (LoOverflow && HiOverflow)
- return replaceInstUsesWith(Cmp, Builder->getFalse());
+ return replaceInstUsesWith(Cmp, Builder.getFalse());
if (HiOverflow)
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
ICmpInst::ICMP_UGE, X, LoBound);
@@ -2291,7 +2291,7 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
HiBound->getUniqueInteger(), DivIsSigned, true));
case ICmpInst::ICMP_NE:
if (LoOverflow && HiOverflow)
- return replaceInstUsesWith(Cmp, Builder->getTrue());
+ return replaceInstUsesWith(Cmp, Builder.getTrue());
if (HiOverflow)
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
ICmpInst::ICMP_ULT, X, LoBound);
@@ -2305,16 +2305,16 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_SLT:
if (LoOverflow == +1) // Low bound is greater than input range.
- return replaceInstUsesWith(Cmp, Builder->getTrue());
+ return replaceInstUsesWith(Cmp, Builder.getTrue());
if (LoOverflow == -1) // Low bound is less than input range.
- return replaceInstUsesWith(Cmp, Builder->getFalse());
+ return replaceInstUsesWith(Cmp, Builder.getFalse());
return new ICmpInst(Pred, X, LoBound);
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_SGT:
if (HiOverflow == +1) // High bound greater than input range.
- return replaceInstUsesWith(Cmp, Builder->getFalse());
+ return replaceInstUsesWith(Cmp, Builder.getFalse());
if (HiOverflow == -1) // High bound less than input range.
- return replaceInstUsesWith(Cmp, Builder->getTrue());
+ return replaceInstUsesWith(Cmp, Builder.getTrue());
if (Pred == ICmpInst::ICMP_UGT)
return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
@@ -2361,12 +2361,12 @@ Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp,
// iff (C2 & (C - 1)) == C - 1 and C is a power of 2
if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() &&
(*C2 & (*C - 1)) == (*C - 1))
- return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateOr(Y, *C - 1), X);
+ return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, *C - 1), X);
// C2 - Y >u C -> (Y | C) != C2
// iff C2 & C == C and C + 1 is a power of 2
if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == *C)
- return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateOr(Y, *C), X);
+ return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, *C), X);
return nullptr;
}
@@ -2422,14 +2422,14 @@ Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
// iff C & (C2-1) == 0
// C2 is a power of 2
if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && (*C2 & (*C - 1)) == 0)
- return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateAnd(X, -(*C)),
+ return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -(*C)),
ConstantExpr::getNeg(cast<Constant>(Y)));
// X+C >u C2 -> (X & ~C2) != C
// iff C & C2 == 0
// C2+1 is a power of 2
if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == 0)
- return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateAnd(X, ~(*C)),
+ return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~(*C)),
ConstantExpr::getNeg(cast<Constant>(Y)));
return nullptr;
@@ -2493,13 +2493,13 @@ Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp,
// When none of the three constants satisfy the predicate for the RHS (C),
// the entire original Cmp can be simplified to a false.
- Value *Cond = Builder->getFalse();
+ Value *Cond = Builder.getFalse();
if (TrueWhenLessThan)
- Cond = Builder->CreateOr(Cond, Builder->CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
+ Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
if (TrueWhenEqual)
- Cond = Builder->CreateOr(Cond, Builder->CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
+ Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
if (TrueWhenGreaterThan)
- Cond = Builder->CreateOr(Cond, Builder->CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
+ Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
return replaceInstUsesWith(Cmp, Cond);
}
@@ -2615,7 +2615,7 @@ Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
if (C->isNullValue() && BO->hasOneUse()) {
const APInt *BOC;
if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
- Value *NewRem = Builder->CreateURem(BOp0, BOp1, BO->getName());
+ Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
return new ICmpInst(Pred, NewRem,
Constant::getNullValue(BO->getType()));
}
@@ -2637,7 +2637,7 @@ Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
if (Value *NegVal = dyn_castNegVal(BOp0))
return new ICmpInst(Pred, NegVal, BOp1);
if (BO->hasOneUse()) {
- Value *Neg = Builder->CreateNeg(BOp1);
+ Value *Neg = Builder.CreateNeg(BOp1);
Neg->takeName(BO);
return new ICmpInst(Pred, BOp0, Neg);
}
@@ -2676,7 +2676,7 @@ Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
// Replace (X | C) == -1 with (X & ~C) == ~C.
// This removes the -1 constant.
Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
- Value *And = Builder->CreateAnd(BOp0, NotBOC);
+ Value *And = Builder.CreateAnd(BOp0, NotBOC);
return new ICmpInst(Pred, And, NotBOC);
}
break;
@@ -2740,23 +2740,26 @@ Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
if (!II || !Cmp.isEquality())
return nullptr;
- // Handle icmp {eq|ne} <intrinsic>, intcst.
+ // Handle icmp {eq|ne} <intrinsic>, Constant.
+ Type *Ty = II->getType();
switch (II->getIntrinsicID()) {
case Intrinsic::bswap:
Worklist.Add(II);
Cmp.setOperand(0, II->getArgOperand(0));
- Cmp.setOperand(1, Builder->getInt(C->byteSwap()));
+ Cmp.setOperand(1, ConstantInt::get(Ty, C->byteSwap()));
return &Cmp;
+
case Intrinsic::ctlz:
case Intrinsic::cttz:
// ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
if (*C == C->getBitWidth()) {
Worklist.Add(II);
Cmp.setOperand(0, II->getArgOperand(0));
- Cmp.setOperand(1, ConstantInt::getNullValue(II->getType()));
+ Cmp.setOperand(1, ConstantInt::getNullValue(Ty));
return &Cmp;
}
break;
+
case Intrinsic::ctpop: {
// popcount(A) == 0 -> A == 0 and likewise for !=
// popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
@@ -2764,8 +2767,8 @@ Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
if (IsZero || *C == C->getBitWidth()) {
Worklist.Add(II);
Cmp.setOperand(0, II->getArgOperand(0));
- auto *NewOp = IsZero ? Constant::getNullValue(II->getType())
- : Constant::getAllOnesValue(II->getType());
+ auto *NewOp =
+ IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty);
Cmp.setOperand(1, NewOp);
return &Cmp;
}
@@ -2774,6 +2777,7 @@ Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
default:
break;
}
+
return nullptr;
}
@@ -2841,11 +2845,11 @@ Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) {
}
if (Transform) {
if (!Op1)
- Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
- I.getName());
+ Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
+ I.getName());
if (!Op2)
- Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
- I.getName());
+ Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
+ I.getName());
return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
}
break;
@@ -3029,12 +3033,12 @@ Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
APInt AP1Abs = C1->getValue().abs();
APInt AP2Abs = C2->getValue().abs();
if (AP1Abs.uge(AP2Abs)) {
- ConstantInt *C3 = Builder->getInt(AP1 - AP2);
- Value *NewAdd = Builder->CreateNSWAdd(A, C3);
+ ConstantInt *C3 = Builder.getInt(AP1 - AP2);
+ Value *NewAdd = Builder.CreateNSWAdd(A, C3);
return new ICmpInst(Pred, NewAdd, C);
} else {
- ConstantInt *C3 = Builder->getInt(AP2 - AP1);
- Value *NewAdd = Builder->CreateNSWAdd(C, C3);
+ ConstantInt *C3 = Builder.getInt(AP2 - AP1);
+ Value *NewAdd = Builder.CreateNSWAdd(C, C3);
return new ICmpInst(Pred, A, NewAdd);
}
}
@@ -3157,8 +3161,8 @@ Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
Constant *Mask = ConstantInt::get(
BO0->getType(),
APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
- Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask);
- Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask);
+ Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
+ Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
return new ICmpInst(Pred, And1, And2);
}
// If there are no trailing zeros in the multiplier, just eliminate
@@ -3315,8 +3319,8 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
ConstantInt *C1, *C2;
if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
Op1->hasOneUse()) {
- Constant *NC = Builder->getInt(C1->getValue() ^ C2->getValue());
- Value *Xor = Builder->CreateXor(C, NC);
+ Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
+ Value *Xor = Builder.CreateXor(C, NC);
return new ICmpInst(Pred, A, Xor);
}
@@ -3362,8 +3366,8 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
}
if (X) { // Build (X^Y) & Z
- Op1 = Builder->CreateXor(X, Y);
- Op1 = Builder->CreateAnd(Op1, Z);
+ Op1 = Builder.CreateXor(X, Y);
+ Op1 = Builder.CreateAnd(Op1, Z);
I.setOperand(0, Op1);
I.setOperand(1, Constant::getNullValue(Op1->getType()));
return &I;
@@ -3380,7 +3384,7 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
APInt Pow2 = Cst1->getValue() + 1;
if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
- return new ICmpInst(Pred, A, Builder->CreateTrunc(B, A->getType()));
+ return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
}
// (A >> C) == (B >> C) --> (A^B) u< (1 << C)
@@ -3394,9 +3398,9 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
if (ShAmt < TypeBits && ShAmt != 0) {
ICmpInst::Predicate NewPred =
Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
- Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
+ Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
- return new ICmpInst(NewPred, Xor, Builder->getInt(CmpVal));
+ return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
}
}
@@ -3406,9 +3410,9 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
unsigned TypeBits = Cst1->getBitWidth();
unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
if (ShAmt < TypeBits && ShAmt != 0) {
- Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
+ Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
- Value *And = Builder->CreateAnd(Xor, Builder->getInt(AndVal),
+ Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
I.getName() + ".mask");
return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
}
@@ -3433,11 +3437,20 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
APInt CmpV = Cst1->getValue().zext(ASize);
CmpV <<= ShAmt;
- Value *Mask = Builder->CreateAnd(A, Builder->getInt(MaskV));
- return new ICmpInst(Pred, Mask, Builder->getInt(CmpV));
+ Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
+ return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
}
}
+ // If both operands are byte-swapped or bit-reversed, just compare the
+ // original values.
+ // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant()
+ // and handle more intrinsics.
+ if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) ||
+ (match(Op0, m_BitReverse(m_Value(A))) &&
+ match(Op1, m_BitReverse(m_Value(B)))))
+ return new ICmpInst(Pred, A, B);
+
return nullptr;
}
@@ -3462,7 +3475,7 @@ Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
RHSOp = RHSC->getOperand(0);
// If the pointer types don't match, insert a bitcast.
if (LHSCIOp->getType() != RHSOp->getType())
- RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
+ RHSOp = Builder.CreateBitCast(RHSOp, LHSCIOp->getType());
}
} else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
@@ -3546,7 +3559,7 @@ Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
// We're performing an unsigned comp with a sign extended value.
// This is true if the input is >= 0. [aka >s -1]
Constant *NegOne = Constant::getAllOnesValue(SrcTy);
- Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName());
+ Value *Result = Builder.CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName());
// Finally, return the value computed.
if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
@@ -3574,7 +3587,7 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
// may be pointing to the compare. We want to insert the new instructions
// before the add in case there are uses of the add between the add and the
// compare.
- Builder->SetInsertPoint(&OrigI);
+ Builder.SetInsertPoint(&OrigI);
switch (OCF) {
case OCF_INVALID:
@@ -3583,11 +3596,11 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
case OCF_UNSIGNED_ADD: {
OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI);
if (OR == OverflowResult::NeverOverflows)
- return SetResult(Builder->CreateNUWAdd(LHS, RHS), Builder->getFalse(),
+ return SetResult(Builder.CreateNUWAdd(LHS, RHS), Builder.getFalse(),
true);
if (OR == OverflowResult::AlwaysOverflows)
- return SetResult(Builder->CreateAdd(LHS, RHS), Builder->getTrue(), true);
+ return SetResult(Builder.CreateAdd(LHS, RHS), Builder.getTrue(), true);
// Fall through uadd into sadd
LLVM_FALLTHROUGH;
@@ -3595,13 +3608,13 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
case OCF_SIGNED_ADD: {
// X + 0 -> {X, false}
if (match(RHS, m_Zero()))
- return SetResult(LHS, Builder->getFalse(), false);
+ return SetResult(LHS, Builder.getFalse(), false);
// We can strength reduce this signed add into a regular add if we can prove
// that it will never overflow.
if (OCF == OCF_SIGNED_ADD)
if (willNotOverflowSignedAdd(LHS, RHS, OrigI))
- return SetResult(Builder->CreateNSWAdd(LHS, RHS), Builder->getFalse(),
+ return SetResult(Builder.CreateNSWAdd(LHS, RHS), Builder.getFalse(),
true);
break;
}
@@ -3610,15 +3623,15 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
case OCF_SIGNED_SUB: {
// X - 0 -> {X, false}
if (match(RHS, m_Zero()))
- return SetResult(LHS, Builder->getFalse(), false);
+ return SetResult(LHS, Builder.getFalse(), false);
if (OCF == OCF_SIGNED_SUB) {
if (willNotOverflowSignedSub(LHS, RHS, OrigI))
- return SetResult(Builder->CreateNSWSub(LHS, RHS), Builder->getFalse(),
+ return SetResult(Builder.CreateNSWSub(LHS, RHS), Builder.getFalse(),
true);
} else {
if (willNotOverflowUnsignedSub(LHS, RHS, OrigI))
- return SetResult(Builder->CreateNUWSub(LHS, RHS), Builder->getFalse(),
+ return SetResult(Builder.CreateNUWSub(LHS, RHS), Builder.getFalse(),
true);
}
break;
@@ -3627,28 +3640,28 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
case OCF_UNSIGNED_MUL: {
OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI);
if (OR == OverflowResult::NeverOverflows)
- return SetResult(Builder->CreateNUWMul(LHS, RHS), Builder->getFalse(),
+ return SetResult(Builder.CreateNUWMul(LHS, RHS), Builder.getFalse(),
true);
if (OR == OverflowResult::AlwaysOverflows)
- return SetResult(Builder->CreateMul(LHS, RHS), Builder->getTrue(), true);
+ return SetResult(Builder.CreateMul(LHS, RHS), Builder.getTrue(), true);
LLVM_FALLTHROUGH;
}
case OCF_SIGNED_MUL:
// X * undef -> undef
if (isa<UndefValue>(RHS))
- return SetResult(RHS, UndefValue::get(Builder->getInt1Ty()), false);
+ return SetResult(RHS, UndefValue::get(Builder.getInt1Ty()), false);
// X * 0 -> {0, false}
if (match(RHS, m_Zero()))
- return SetResult(RHS, Builder->getFalse(), false);
+ return SetResult(RHS, Builder.getFalse(), false);
// X * 1 -> {X, false}
if (match(RHS, m_One()))
- return SetResult(LHS, Builder->getFalse(), false);
+ return SetResult(LHS, Builder.getFalse(), false);
if (OCF == OCF_SIGNED_MUL)
if (willNotOverflowSignedMul(LHS, RHS, OrigI))
- return SetResult(Builder->CreateNSWMul(LHS, RHS), Builder->getFalse(),
+ return SetResult(Builder.CreateNSWMul(LHS, RHS), Builder.getFalse(),
true);
break;
}
@@ -3813,25 +3826,25 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
return nullptr;
}
- InstCombiner::BuilderTy *Builder = IC.Builder;
- Builder->SetInsertPoint(MulInstr);
+ InstCombiner::BuilderTy &Builder = IC.Builder;
+ Builder.SetInsertPoint(MulInstr);
// Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
Value *MulA = A, *MulB = B;
if (WidthA < MulWidth)
- MulA = Builder->CreateZExt(A, MulType);
+ MulA = Builder.CreateZExt(A, MulType);
if (WidthB < MulWidth)
- MulB = Builder->CreateZExt(B, MulType);
+ MulB = Builder.CreateZExt(B, MulType);
Value *F = Intrinsic::getDeclaration(I.getModule(),
Intrinsic::umul_with_overflow, MulType);
- CallInst *Call = Builder->CreateCall(F, {MulA, MulB}, "umul");
+ CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
IC.Worklist.Add(MulInstr);
// If there are uses of mul result other than the comparison, we know that
// they are truncation or binary AND. Change them to use result of
// mul.with.overflow and adjust properly mask/size.
if (MulVal->hasNUsesOrMore(2)) {
- Value *Mul = Builder->CreateExtractValue(Call, 0, "umul.value");
+ Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
for (User *U : MulVal->users()) {
if (U == &I || U == OtherVal)
continue;
@@ -3843,17 +3856,18 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
assert(BO->getOpcode() == Instruction::And);
// Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
- ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
- APInt ShortMask = CI->getValue().trunc(MulWidth);
- Value *ShortAnd = Builder->CreateAnd(Mul, ShortMask);
- Instruction *Zext =
- cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType()));
- IC.Worklist.Add(Zext);
+ Value *ShortMask =
+ Builder.CreateTrunc(BO->getOperand(1), Builder.getIntNTy(MulWidth));
+ Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
+ Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
+ if (auto *ZextI = dyn_cast<Instruction>(Zext))
+ IC.Worklist.Add(ZextI);
IC.replaceInstUsesWith(*BO, Zext);
} else {
llvm_unreachable("Unexpected Binary operation");
}
- IC.Worklist.Add(cast<Instruction>(U));
+ if (auto *UI = dyn_cast<Instruction>(U))
+ IC.Worklist.Add(UI);
}
}
if (isa<Instruction>(OtherVal))
@@ -3884,7 +3898,7 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
llvm_unreachable("Unexpected predicate");
}
if (Inverse) {
- Value *Res = Builder->CreateExtractValue(Call, 1);
+ Value *Res = Builder.CreateExtractValue(Call, 1);
return BinaryOperator::CreateNot(Res);
}
@@ -4239,7 +4253,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
if (Op1Max == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
- Builder->getInt(CI->getValue() - 1));
+ Builder.getInt(CI->getValue() - 1));
}
break;
case ICmpInst::ICMP_SGT:
@@ -4253,7 +4267,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
if (Op1Min == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
- Builder->getInt(CI->getValue() + 1));
+ Builder.getInt(CI->getValue() + 1));
}
break;
case ICmpInst::ICMP_SGE:
@@ -4358,7 +4372,7 @@ static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
static Instruction *canonicalizeICmpBool(ICmpInst &I,
InstCombiner::BuilderTy &Builder) {
Value *A = I.getOperand(0), *B = I.getOperand(1);
- assert(A->getType()->getScalarType()->isIntegerTy(1) && "Bools only");
+ assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
// A boolean compared to true/false can be simplified to Op0/true/false in
// 14 out of the 20 (10 predicates * 2 constants) possible combinations.
@@ -4465,8 +4479,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
}
- if (Op0->getType()->getScalarType()->isIntegerTy(1))
- if (Instruction *Res = canonicalizeICmpBool(I, *Builder))
+ if (Op0->getType()->isIntOrIntVectorTy(1))
+ if (Instruction *Res = canonicalizeICmpBool(I, Builder))
return Res;
if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I))
@@ -4559,7 +4573,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
} else {
// Otherwise, cast the RHS right before the icmp
- Op1 = Builder->CreateBitCast(Op1, Op0->getType());
+ Op1 = Builder.CreateBitCast(Op1, Op0->getType());
}
}
return new ICmpInst(I.getPredicate(), Op0, Op1);
@@ -4592,8 +4606,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
match(Op1, m_Zero()) &&
isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
- return new ICmpInst(I.getInversePredicate(),
- Builder->CreateAnd(A, B),
+ return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
Op1);
// ~X < ~Y --> Y < X
@@ -4693,10 +4706,10 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getFalse());
assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
- return replaceInstUsesWith(I, Builder->getTrue());
+ return replaceInstUsesWith(I, Builder.getTrue());
}
}
@@ -4762,9 +4775,9 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
Pred = ICmpInst::ICMP_NE;
break;
case FCmpInst::FCMP_ORD:
- return replaceInstUsesWith(I, Builder->getTrue());
+ return replaceInstUsesWith(I, Builder.getTrue());
case FCmpInst::FCMP_UNO:
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getFalse());
}
// Now we know that the APFloat is a normal number, zero or inf.
@@ -4782,8 +4795,8 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
Pred == ICmpInst::ICMP_SLE)
- return replaceInstUsesWith(I, Builder->getTrue());
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getTrue());
+ return replaceInstUsesWith(I, Builder.getFalse());
}
} else {
// If the RHS value is > UnsignedMax, fold the comparison. This handles
@@ -4794,8 +4807,8 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
Pred == ICmpInst::ICMP_ULE)
- return replaceInstUsesWith(I, Builder->getTrue());
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getTrue());
+ return replaceInstUsesWith(I, Builder.getFalse());
}
}
@@ -4807,8 +4820,8 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
Pred == ICmpInst::ICMP_SGE)
- return replaceInstUsesWith(I, Builder->getTrue());
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getTrue());
+ return replaceInstUsesWith(I, Builder.getFalse());
}
} else {
// See if the RHS value is < UnsignedMin.
@@ -4818,8 +4831,8 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
Pred == ICmpInst::ICMP_UGE)
- return replaceInstUsesWith(I, Builder->getTrue());
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getTrue());
+ return replaceInstUsesWith(I, Builder.getFalse());
}
}
@@ -4841,14 +4854,14 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
switch (Pred) {
default: llvm_unreachable("Unexpected integer comparison!");
case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
- return replaceInstUsesWith(I, Builder->getTrue());
+ return replaceInstUsesWith(I, Builder.getTrue());
case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getFalse());
case ICmpInst::ICMP_ULE:
// (float)int <= 4.4 --> int <= 4
// (float)int <= -4.4 --> false
if (RHS.isNegative())
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getFalse());
break;
case ICmpInst::ICMP_SLE:
// (float)int <= 4.4 --> int <= 4
@@ -4860,7 +4873,7 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
// (float)int < -4.4 --> false
// (float)int < 4.4 --> int <= 4
if (RHS.isNegative())
- return replaceInstUsesWith(I, Builder->getFalse());
+ return replaceInstUsesWith(I, Builder.getFalse());
Pred = ICmpInst::ICMP_ULE;
break;
case ICmpInst::ICMP_SLT:
@@ -4873,7 +4886,7 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
// (float)int > 4.4 --> int > 4
// (float)int > -4.4 --> true
if (RHS.isNegative())
- return replaceInstUsesWith(I, Builder->getTrue());
+ return replaceInstUsesWith(I, Builder.getTrue());
break;
case ICmpInst::ICMP_SGT:
// (float)int > 4.4 --> int > 4
@@ -4885,7 +4898,7 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
// (float)int >= -4.4 --> true
// (float)int >= 4.4 --> int > 4
if (RHS.isNegative())
- return replaceInstUsesWith(I, Builder->getTrue());
+ return replaceInstUsesWith(I, Builder.getTrue());
Pred = ICmpInst::ICMP_UGT;
break;
case ICmpInst::ICMP_SGE:
diff --git a/lib/Transforms/InstCombine/InstCombineInternal.h b/lib/Transforms/InstCombine/InstCombineInternal.h
index 87f11467b95e..c38a4981bf1d 100644
--- a/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -21,8 +21,6 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/BinaryFormat/Dwarf.h"
-#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstVisitor.h"
@@ -212,7 +210,7 @@ public:
/// \brief An IRBuilder that automatically inserts new instructions into the
/// worklist.
typedef IRBuilder<TargetFolder, IRBuilderCallbackInserter> BuilderTy;
- BuilderTy *Builder;
+ BuilderTy &Builder;
private:
// Mode in which we are running the combiner.
@@ -235,7 +233,7 @@ private:
bool MadeIRChange;
public:
- InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder,
+ InstCombiner(InstCombineWorklist &Worklist, BuilderTy &Builder,
bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA,
AssumptionCache &AC, TargetLibraryInfo &TLI, DominatorTree &DT,
const DataLayout &DL, LoopInfo *LI)
@@ -598,9 +596,8 @@ private:
/// This tries to simplify binary operations by factorizing out common terms
/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
- Value *tryFactorization(InstCombiner::BuilderTy *, BinaryOperator &,
- Instruction::BinaryOps, Value *, Value *, Value *,
- Value *);
+ Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *,
+ Value *, Value *, Value *);
/// Match a select chain which produces one of three values based on whether
/// the LHS is less than, equal to, or greater than RHS respectively.
@@ -639,7 +636,6 @@ private:
APInt &UndefElts, unsigned Depth = 0);
Value *SimplifyVectorOp(BinaryOperator &Inst);
- Value *SimplifyBSwap(BinaryOperator &Inst);
/// Given a binary operator, cast instruction, or select which has a PHI node
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 26bee204e5a4..c59e1ce69ac2 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -189,7 +189,7 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
return nullptr;
// Canonicalize it.
- Value *V = IC.Builder->getInt32(1);
+ Value *V = IC.Builder.getInt32(1);
AI.setOperand(0, V);
return &AI;
}
@@ -197,7 +197,7 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
- AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
+ AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
New->setAlignment(AI.getAlignment());
// Scan to the end of the allocation instructions, to skip over a block of
@@ -229,7 +229,7 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
// any casting is exposed early.
Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
if (AI.getArraySize()->getType() != IntPtrTy) {
- Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
+ Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
AI.setOperand(0, V);
return &AI;
}
@@ -458,10 +458,10 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
LI.getAllMetadata(MD);
- LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
- IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
+ LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
+ IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
- NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
@@ -518,10 +518,10 @@ static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
SI.getAllMetadata(MD);
- StoreInst *NewStore = IC.Builder->CreateAlignedStore(
- V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
+ StoreInst *NewStore = IC.Builder.CreateAlignedStore(
+ V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment(), SI.isVolatile());
- NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
+ NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
@@ -613,7 +613,7 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
// Replace all the stores with stores of the newly loaded value.
for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
auto *SI = cast<StoreInst>(*UI++);
- IC.Builder->SetInsertPoint(SI);
+ IC.Builder.SetInsertPoint(SI);
combineStoreToNewValue(IC, *SI, NewLoad);
IC.eraseInstFromFunction(*SI);
}
@@ -664,7 +664,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
NewLoad->setAAMetadata(AAMD);
- return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
+ return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
UndefValue::get(T), NewLoad, 0, Name));
}
@@ -689,15 +689,15 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
Zero,
ConstantInt::get(IdxType, i),
};
- auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
- Name + ".elt");
+ auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
+ Name + ".elt");
auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
- auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
+ auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
// Propagate AA metadata. It'll still be valid on the narrowed load.
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
L->setAAMetadata(AAMD);
- V = IC.Builder->CreateInsertValue(V, L, i);
+ V = IC.Builder.CreateInsertValue(V, L, i);
}
V->setName(Name);
@@ -712,7 +712,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
NewLoad->setAAMetadata(AAMD);
- return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
+ return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
UndefValue::get(T), NewLoad, 0, Name));
}
@@ -740,14 +740,14 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
Zero,
ConstantInt::get(IdxType, i),
};
- auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
- Name + ".elt");
- auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
- Name + ".unpack");
+ auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
+ Name + ".elt");
+ auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
+ Name + ".unpack");
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
L->setAAMetadata(AAMD);
- V = IC.Builder->CreateInsertValue(V, L, i);
+ V = IC.Builder.CreateInsertValue(V, L, i);
Offset += EltSize;
}
@@ -982,8 +982,8 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
return replaceInstUsesWith(
- LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
- LI.getName() + ".cast"));
+ LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
+ LI.getName() + ".cast"));
}
// None of the following transforms are legal for volatile/ordered atomic
@@ -1019,15 +1019,15 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
unsigned Align = LI.getAlignment();
if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
- LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
- SI->getOperand(1)->getName()+".val");
- LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
- SI->getOperand(2)->getName()+".val");
+ LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
+ SI->getOperand(1)->getName()+".val");
+ LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
+ SI->getOperand(2)->getName()+".val");
assert(LI.isUnordered() && "implied by above");
V1->setAlignment(Align);
- V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
V2->setAlignment(Align);
- V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
return SelectInst::Create(SI->getCondition(), V1, V2);
}
@@ -1172,7 +1172,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// If the struct only have one element, we unpack.
unsigned Count = ST->getNumElements();
if (Count == 1) {
- V = IC.Builder->CreateExtractValue(V, 0);
+ V = IC.Builder.CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
@@ -1201,12 +1201,11 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
Zero,
ConstantInt::get(IdxType, i),
};
- auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
- AddrName);
- auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
+ auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
+ AddrName);
+ auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
- llvm::Instruction *NS =
- IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
+ llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
AAMDNodes AAMD;
SI.getAAMetadata(AAMD);
NS->setAAMetadata(AAMD);
@@ -1219,7 +1218,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// If the array only have one element, we unpack.
auto NumElements = AT->getNumElements();
if (NumElements == 1) {
- V = IC.Builder->CreateExtractValue(V, 0);
+ V = IC.Builder.CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
@@ -1252,11 +1251,11 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
Zero,
ConstantInt::get(IdxType, i),
};
- auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
- AddrName);
- auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
+ auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
+ AddrName);
+ auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
auto EltAlign = MinAlign(Align, Offset);
- Instruction *NS = IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
+ Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
AAMDNodes AAMD;
SI.getAAMetadata(AAMD);
NS->setAAMetadata(AAMD);
@@ -1541,7 +1540,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
SI.isVolatile(),
SI.getAlignment(),
SI.getOrdering(),
- SI.getSynchScope());
+ SI.getSyncScopeID());
InsertNewInstBefore(NewSI, *BBI);
// The debug locations of the original instructions might differ; merge them.
NewSI->setDebugLoc(DILocation::getMergedLocation(SI.getDebugLoc(),
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 579639a6194e..e3a50220f94e 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -39,8 +39,8 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC,
Value *A = nullptr, *B = nullptr, *One = nullptr;
if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
match(One, m_One())) {
- A = IC.Builder->CreateSub(A, B);
- return IC.Builder->CreateShl(One, A);
+ A = IC.Builder.CreateSub(A, B);
+ return IC.Builder.CreateShl(One, A);
}
// (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
@@ -250,9 +250,9 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
ConstantInt *C1;
Value *Sub = nullptr;
if (match(Op0, m_Sub(m_Value(Y), m_Value(X))))
- Sub = Builder->CreateSub(X, Y, "suba");
+ Sub = Builder.CreateSub(X, Y, "suba");
else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1))))
- Sub = Builder->CreateSub(Builder->CreateNeg(C1), Y, "subc");
+ Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc");
if (Sub)
return
BinaryOperator::CreateMul(Sub,
@@ -272,11 +272,11 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
Value *X;
Constant *C1;
if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
- Value *Mul = Builder->CreateMul(C1, Op1);
+ Value *Mul = Builder.CreateMul(C1, Op1);
// Only go forward with the transform if C1*CI simplifies to a tidier
// constant.
if (!match(Mul, m_Mul(m_Value(), m_Value())))
- return BinaryOperator::CreateAdd(Builder->CreateMul(X, Op1), Mul);
+ return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul);
}
}
}
@@ -318,7 +318,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
: Instruction::SRem;
- Value *Rem = Builder->CreateBinOp(RemOpc, X, DivOp1);
+ Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1);
if (DivOp1 == Y)
return BinaryOperator::CreateSub(X, Rem);
return BinaryOperator::CreateSub(Rem, X);
@@ -326,7 +326,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
/// i1 mul -> i1 and.
- if (I.getType()->getScalarType()->isIntegerTy(1))
+ if (I.getType()->isIntOrIntVectorTy(1))
return BinaryOperator::CreateAnd(Op0, Op1);
// X*(1 << Y) --> X << Y
@@ -368,7 +368,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
if (BoolCast) {
- Value *V = Builder->CreateSub(Constant::getNullValue(I.getType()),
+ Value *V = Builder.CreateSub(Constant::getNullValue(I.getType()),
BoolCast);
return BinaryOperator::CreateAnd(V, OtherOp);
}
@@ -386,7 +386,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
willNotOverflowSignedMul(Op0Conv->getOperand(0), CI, I)) {
// Insert the new, smaller mul.
Value *NewMul =
- Builder->CreateNSWMul(Op0Conv->getOperand(0), CI, "mulconv");
+ Builder.CreateNSWMul(Op0Conv->getOperand(0), CI, "mulconv");
return new SExtInst(NewMul, I.getType());
}
}
@@ -403,7 +403,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
willNotOverflowSignedMul(Op0Conv->getOperand(0),
Op1Conv->getOperand(0), I)) {
// Insert the new integer mul.
- Value *NewMul = Builder->CreateNSWMul(
+ Value *NewMul = Builder.CreateNSWMul(
Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv");
return new SExtInst(NewMul, I.getType());
}
@@ -422,7 +422,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
willNotOverflowUnsignedMul(Op0Conv->getOperand(0), CI, I)) {
// Insert the new, smaller mul.
Value *NewMul =
- Builder->CreateNUWMul(Op0Conv->getOperand(0), CI, "mulconv");
+ Builder.CreateNUWMul(Op0Conv->getOperand(0), CI, "mulconv");
return new ZExtInst(NewMul, I.getType());
}
}
@@ -439,7 +439,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
willNotOverflowUnsignedMul(Op0Conv->getOperand(0),
Op1Conv->getOperand(0), I)) {
// Insert the new integer mul.
- Value *NewMul = Builder->CreateNUWMul(
+ Value *NewMul = Builder.CreateNUWMul(
Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv");
return new ZExtInst(NewMul, I.getType());
}
@@ -698,11 +698,11 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
}
// if pattern detected emit alternate sequence
if (OpX && OpY) {
- BuilderTy::FastMathFlagGuard Guard(*Builder);
- Builder->setFastMathFlags(Log2->getFastMathFlags());
+ BuilderTy::FastMathFlagGuard Guard(Builder);
+ Builder.setFastMathFlags(Log2->getFastMathFlags());
Log2->setArgOperand(0, OpY);
- Value *FMulVal = Builder->CreateFMul(OpX, Log2);
- Value *FSub = Builder->CreateFSub(FMulVal, OpX);
+ Value *FMulVal = Builder.CreateFMul(OpX, Log2);
+ Value *FSub = Builder.CreateFSub(FMulVal, OpX);
FSub->takeName(&I);
return replaceInstUsesWith(I, FSub);
}
@@ -714,23 +714,23 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
for (int i = 0; i < 2; i++) {
bool IgnoreZeroSign = I.hasNoSignedZeros();
if (BinaryOperator::isFNeg(Opnd0, IgnoreZeroSign)) {
- BuilderTy::FastMathFlagGuard Guard(*Builder);
- Builder->setFastMathFlags(I.getFastMathFlags());
+ BuilderTy::FastMathFlagGuard Guard(Builder);
+ Builder.setFastMathFlags(I.getFastMathFlags());
Value *N0 = dyn_castFNegVal(Opnd0, IgnoreZeroSign);
Value *N1 = dyn_castFNegVal(Opnd1, IgnoreZeroSign);
// -X * -Y => X*Y
if (N1) {
- Value *FMul = Builder->CreateFMul(N0, N1);
+ Value *FMul = Builder.CreateFMul(N0, N1);
FMul->takeName(&I);
return replaceInstUsesWith(I, FMul);
}
if (Opnd0->hasOneUse()) {
// -X * Y => -(X*Y) (Promote negation as high as possible)
- Value *T = Builder->CreateFMul(N0, Opnd1);
- Value *Neg = Builder->CreateFNeg(T);
+ Value *T = Builder.CreateFMul(N0, Opnd1);
+ Value *Neg = Builder.CreateFNeg(T);
Neg->takeName(&I);
return replaceInstUsesWith(I, Neg);
}
@@ -755,10 +755,10 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
Y = Opnd0_0;
if (Y) {
- BuilderTy::FastMathFlagGuard Guard(*Builder);
- Builder->setFastMathFlags(I.getFastMathFlags());
- Value *T = Builder->CreateFMul(Opnd1, Opnd1);
- Value *R = Builder->CreateFMul(T, Y);
+ BuilderTy::FastMathFlagGuard Guard(Builder);
+ Builder.setFastMathFlags(I.getFastMathFlags());
+ Value *T = Builder.CreateFMul(Opnd1, Opnd1);
+ Value *R = Builder.CreateFMul(T, Y);
R->takeName(&I);
return replaceInstUsesWith(I, R);
}
@@ -824,7 +824,7 @@ bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
*I = SI->getOperand(NonNullOperand);
Worklist.Add(&*BBI);
} else if (*I == SelectCond) {
- *I = Builder->getInt1(NonNullOperand == 1);
+ *I = Builder.getInt1(NonNullOperand == 1);
Worklist.Add(&*BBI);
}
}
@@ -938,20 +938,18 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
}
if (match(Op0, m_One())) {
- assert(!I.getType()->getScalarType()->isIntegerTy(1) &&
- "i1 divide not removed?");
+ assert(!I.getType()->isIntOrIntVectorTy(1) && "i1 divide not removed?");
if (I.getOpcode() == Instruction::SDiv) {
// If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
// result is one, if Op1 is -1 then the result is minus one, otherwise
// it's zero.
- Value *Inc = Builder->CreateAdd(Op1, Op0);
- Value *Cmp = Builder->CreateICmpULT(
- Inc, ConstantInt::get(I.getType(), 3));
+ Value *Inc = Builder.CreateAdd(Op1, Op0);
+ Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(I.getType(), 3));
return SelectInst::Create(Cmp, Op1, ConstantInt::get(I.getType(), 0));
} else {
// If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
// result is one, otherwise it's zero.
- return new ZExtInst(Builder->CreateICmpEQ(Op1, Op0), I.getType());
+ return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), I.getType());
}
}
@@ -1026,7 +1024,7 @@ static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
// X udiv C, where C >= signbit
static Instruction *foldUDivNegCst(Value *Op0, Value *Op1,
const BinaryOperator &I, InstCombiner &IC) {
- Value *ICI = IC.Builder->CreateICmpULT(Op0, cast<ConstantInt>(Op1));
+ Value *ICI = IC.Builder.CreateICmpULT(Op0, cast<ConstantInt>(Op1));
return SelectInst::Create(ICI, Constant::getNullValue(I.getType()),
ConstantInt::get(I.getType(), 1));
@@ -1045,10 +1043,9 @@ static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I,
if (!match(ShiftLeft, m_Shl(m_APInt(CI), m_Value(N))))
llvm_unreachable("match should never fail here!");
if (*CI != 1)
- N = IC.Builder->CreateAdd(N,
- ConstantInt::get(N->getType(), CI->logBase2()));
+ N = IC.Builder.CreateAdd(N, ConstantInt::get(N->getType(), CI->logBase2()));
if (Op1 != ShiftLeft)
- N = IC.Builder->CreateZExt(N, Op1->getType());
+ N = IC.Builder.CreateZExt(N, Op1->getType());
BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N);
if (I.isExact())
LShr->setIsExact();
@@ -1134,7 +1131,7 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
if (ZExtInst *ZOp0 = dyn_cast<ZExtInst>(Op0))
if (Value *ZOp1 = dyn_castZExtVal(Op1, ZOp0->getSrcTy()))
return new ZExtInst(
- Builder->CreateUDiv(ZOp0->getOperand(0), ZOp1, "div", I.isExact()),
+ Builder.CreateUDiv(ZOp0->getOperand(0), ZOp1, "div", I.isExact()),
I.getType());
// (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...))))
@@ -1209,7 +1206,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
Constant *NarrowDivisor =
ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
- Value *NarrowOp = Builder->CreateSDiv(Op0Src, NarrowDivisor);
+ Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
return new SExtInst(NarrowOp, Op0->getType());
}
}
@@ -1217,7 +1214,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
if (Constant *RHS = dyn_cast<Constant>(Op1)) {
// X/INT_MIN -> X == INT_MIN
if (RHS->isMinSignedValue())
- return new ZExtInst(Builder->CreateICmpEQ(Op0, Op1), I.getType());
+ return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType());
// -X/C --> X/-C provided the negation doesn't overflow.
Value *X;
@@ -1380,7 +1377,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
// (X/Y) / Z => X / (Y*Z)
//
if (!isa<Constant>(Y) || !isa<Constant>(Op1)) {
- NewInst = Builder->CreateFMul(Y, Op1);
+ NewInst = Builder.CreateFMul(Y, Op1);
if (Instruction *RI = dyn_cast<Instruction>(NewInst)) {
FastMathFlags Flags = I.getFastMathFlags();
Flags &= cast<Instruction>(Op0)->getFastMathFlags();
@@ -1392,7 +1389,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
// Z / (X/Y) => Z*Y / X
//
if (!isa<Constant>(Y) || !isa<Constant>(Op0)) {
- NewInst = Builder->CreateFMul(Op0, Y);
+ NewInst = Builder.CreateFMul(Op0, Y);
if (Instruction *RI = dyn_cast<Instruction>(NewInst)) {
FastMathFlags Flags = I.getFastMathFlags();
Flags &= cast<Instruction>(Op1)->getFastMathFlags();
@@ -1483,28 +1480,28 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
// (zext A) urem (zext B) --> zext (A urem B)
if (ZExtInst *ZOp0 = dyn_cast<ZExtInst>(Op0))
if (Value *ZOp1 = dyn_castZExtVal(Op1, ZOp0->getSrcTy()))
- return new ZExtInst(Builder->CreateURem(ZOp0->getOperand(0), ZOp1),
+ return new ZExtInst(Builder.CreateURem(ZOp0->getOperand(0), ZOp1),
I.getType());
// X urem Y -> X and Y-1, where Y is a power of 2,
if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
Constant *N1 = Constant::getAllOnesValue(I.getType());
- Value *Add = Builder->CreateAdd(Op1, N1);
+ Value *Add = Builder.CreateAdd(Op1, N1);
return BinaryOperator::CreateAnd(Op0, Add);
}
// 1 urem X -> zext(X != 1)
if (match(Op0, m_One())) {
- Value *Cmp = Builder->CreateICmpNE(Op1, Op0);
- Value *Ext = Builder->CreateZExt(Cmp, I.getType());
+ Value *Cmp = Builder.CreateICmpNE(Op1, Op0);
+ Value *Ext = Builder.CreateZExt(Cmp, I.getType());
return replaceInstUsesWith(I, Ext);
}
// X urem C -> X < C ? X : X - C, where C >= signbit.
const APInt *DivisorC;
if (match(Op1, m_APInt(DivisorC)) && DivisorC->isNegative()) {
- Value *Cmp = Builder->CreateICmpULT(Op0, Op1);
- Value *Sub = Builder->CreateSub(Op0, Op1);
+ Value *Cmp = Builder.CreateICmpULT(Op0, Op1);
+ Value *Sub = Builder.CreateSub(Op0, Op1);
return SelectInst::Create(Cmp, Op0, Sub);
}
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 5dbf1e85b05b..0011412c2bf4 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -636,10 +636,10 @@ static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
/// Return an existing non-zero constant if this phi node has one, otherwise
/// return constant 1.
static ConstantInt *GetAnyNonZeroConstInt(PHINode &PN) {
- assert(isa<IntegerType>(PN.getType()) && "Expect only intger type phi");
+ assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
for (Value *V : PN.operands())
if (auto *ConstVA = dyn_cast<ConstantInt>(V))
- if (!ConstVA->isZeroValue())
+ if (!ConstVA->isZero())
return ConstVA;
return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
}
@@ -836,12 +836,12 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
}
// Otherwise, do an extract in the predecessor.
- Builder->SetInsertPoint(Pred->getTerminator());
+ Builder.SetInsertPoint(Pred->getTerminator());
Value *Res = InVal;
if (Offset)
- Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
+ Res = Builder.CreateLShr(Res, ConstantInt::get(InVal->getType(),
Offset), "extract");
- Res = Builder->CreateTrunc(Res, Ty, "extract.t");
+ Res = Builder.CreateTrunc(Res, Ty, "extract.t");
PredVal = Res;
EltPHI->addIncoming(Res, Pred);
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 80c6595904e1..4eebe8255998 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -61,12 +61,12 @@ static CmpInst::Predicate getCmpPredicateForMinMax(SelectPatternFlavor SPF,
}
}
-static Value *generateMinMaxSelectPattern(InstCombiner::BuilderTy *Builder,
+static Value *generateMinMaxSelectPattern(InstCombiner::BuilderTy &Builder,
SelectPatternFlavor SPF, Value *A,
Value *B) {
CmpInst::Predicate Pred = getCmpPredicateForMinMax(SPF);
assert(CmpInst::isIntPredicate(Pred));
- return Builder->CreateSelect(Builder->CreateICmp(Pred, A, B), A, B);
+ return Builder.CreateSelect(Builder.CreateICmp(Pred, A, B), A, B);
}
/// We want to turn code that looks like this:
@@ -167,8 +167,8 @@ Instruction *InstCombiner::foldSelectOpOp(SelectInst &SI, Instruction *TI,
// Fold this by inserting a select from the input values.
Value *NewSI =
- Builder->CreateSelect(SI.getCondition(), TI->getOperand(0),
- FI->getOperand(0), SI.getName() + ".v", &SI);
+ Builder.CreateSelect(SI.getCondition(), TI->getOperand(0),
+ FI->getOperand(0), SI.getName() + ".v", &SI);
return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
TI->getType());
}
@@ -211,8 +211,8 @@ Instruction *InstCombiner::foldSelectOpOp(SelectInst &SI, Instruction *TI,
}
// If we reach here, they do have operations in common.
- Value *NewSI = Builder->CreateSelect(SI.getCondition(), OtherOpT, OtherOpF,
- SI.getName() + ".v", &SI);
+ Value *NewSI = Builder.CreateSelect(SI.getCondition(), OtherOpT, OtherOpF,
+ SI.getName() + ".v", &SI);
Value *Op0 = MatchIsOpZero ? MatchOp : NewSI;
Value *Op1 = MatchIsOpZero ? NewSI : MatchOp;
return BinaryOperator::Create(BO->getOpcode(), Op0, Op1);
@@ -227,8 +227,8 @@ static bool isSelect01(Constant *C1, Constant *C2) {
return false;
if (!C1I->isZero() && !C2I->isZero()) // One side must be zero.
return false;
- return C1I->isOne() || C1I->isAllOnesValue() ||
- C2I->isOne() || C2I->isAllOnesValue();
+ return C1I->isOne() || C1I->isMinusOne() ||
+ C2I->isOne() || C2I->isMinusOne();
}
/// Try to fold the select into one of the operands to allow further
@@ -254,7 +254,7 @@ Instruction *InstCombiner::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
// Avoid creating select between 2 constants unless it's selecting
// between 0, 1 and -1.
if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
- Value *NewSel = Builder->CreateSelect(SI.getCondition(), OOp, C);
+ Value *NewSel = Builder.CreateSelect(SI.getCondition(), OOp, C);
NewSel->takeName(TVI);
BinaryOperator *TVI_BO = cast<BinaryOperator>(TVI);
BinaryOperator *BO = BinaryOperator::Create(TVI_BO->getOpcode(),
@@ -284,7 +284,7 @@ Instruction *InstCombiner::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
// Avoid creating select between 2 constants unless it's selecting
// between 0, 1 and -1.
if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
- Value *NewSel = Builder->CreateSelect(SI.getCondition(), C, OOp);
+ Value *NewSel = Builder.CreateSelect(SI.getCondition(), C, OOp);
NewSel->takeName(FVI);
BinaryOperator *FVI_BO = cast<BinaryOperator>(FVI);
BinaryOperator *BO = BinaryOperator::Create(FVI_BO->getOpcode(),
@@ -315,7 +315,7 @@ Instruction *InstCombiner::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
/// 3. The magnitude of C2 and C1 are flipped
static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
Value *FalseVal,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
if (!IC || !SI.getType()->isIntegerTy())
return nullptr;
@@ -383,22 +383,22 @@ static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
if (NeedAnd) {
// Insert the AND instruction on the input to the truncate.
APInt C1 = APInt::getOneBitSet(V->getType()->getScalarSizeInBits(), C1Log);
- V = Builder->CreateAnd(V, ConstantInt::get(V->getType(), C1));
+ V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), C1));
}
if (C2Log > C1Log) {
- V = Builder->CreateZExtOrTrunc(V, Y->getType());
- V = Builder->CreateShl(V, C2Log - C1Log);
+ V = Builder.CreateZExtOrTrunc(V, Y->getType());
+ V = Builder.CreateShl(V, C2Log - C1Log);
} else if (C1Log > C2Log) {
- V = Builder->CreateLShr(V, C1Log - C2Log);
- V = Builder->CreateZExtOrTrunc(V, Y->getType());
+ V = Builder.CreateLShr(V, C1Log - C2Log);
+ V = Builder.CreateZExtOrTrunc(V, Y->getType());
} else
- V = Builder->CreateZExtOrTrunc(V, Y->getType());
+ V = Builder.CreateZExtOrTrunc(V, Y->getType());
if (NeedXor)
- V = Builder->CreateXor(V, *C2);
+ V = Builder.CreateXor(V, *C2);
- return Builder->CreateOr(V, Y);
+ return Builder.CreateOr(V, Y);
}
/// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single
@@ -414,7 +414,7 @@ static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
/// into:
/// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
- InstCombiner::BuilderTy *Builder) {
+ InstCombiner::BuilderTy &Builder) {
ICmpInst::Predicate Pred = ICI->getPredicate();
Value *CmpLHS = ICI->getOperand(0);
Value *CmpRHS = ICI->getOperand(1);
@@ -449,8 +449,8 @@ static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
IntrinsicInst *NewI = cast<IntrinsicInst>(II->clone());
Type *Ty = NewI->getArgOperand(1)->getType();
NewI->setArgOperand(1, Constant::getNullValue(Ty));
- Builder->Insert(NewI);
- return Builder->CreateZExtOrTrunc(NewI, ValueOnZero->getType());
+ Builder.Insert(NewI);
+ return Builder.CreateZExtOrTrunc(NewI, ValueOnZero->getType());
}
return nullptr;
@@ -597,7 +597,7 @@ canonicalizeMinMaxWithConstant(SelectInst &Sel, ICmpInst &Cmp,
/// Visit a SelectInst that has an ICmpInst as its first operand.
Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
ICmpInst *ICI) {
- if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, *Builder))
+ if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, Builder))
return NewSel;
bool Changed = adjustMinMax(SI, *ICI);
@@ -617,23 +617,23 @@ Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
if (TrueVal->getType() == Ty) {
if (ConstantInt *Cmp = dyn_cast<ConstantInt>(CmpRHS)) {
ConstantInt *C1 = nullptr, *C2 = nullptr;
- if (Pred == ICmpInst::ICMP_SGT && Cmp->isAllOnesValue()) {
+ if (Pred == ICmpInst::ICMP_SGT && Cmp->isMinusOne()) {
C1 = dyn_cast<ConstantInt>(TrueVal);
C2 = dyn_cast<ConstantInt>(FalseVal);
- } else if (Pred == ICmpInst::ICMP_SLT && Cmp->isNullValue()) {
+ } else if (Pred == ICmpInst::ICMP_SLT && Cmp->isZero()) {
C1 = dyn_cast<ConstantInt>(FalseVal);
C2 = dyn_cast<ConstantInt>(TrueVal);
}
if (C1 && C2) {
// This shift results in either -1 or 0.
- Value *AShr = Builder->CreateAShr(CmpLHS, Ty->getBitWidth()-1);
+ Value *AShr = Builder.CreateAShr(CmpLHS, Ty->getBitWidth() - 1);
// Check if we can express the operation with a single or.
- if (C2->isAllOnesValue())
- return replaceInstUsesWith(SI, Builder->CreateOr(AShr, C1));
+ if (C2->isMinusOne())
+ return replaceInstUsesWith(SI, Builder.CreateOr(AShr, C1));
- Value *And = Builder->CreateAnd(AShr, C2->getValue()-C1->getValue());
- return replaceInstUsesWith(SI, Builder->CreateAdd(And, C1));
+ Value *And = Builder.CreateAnd(AShr, C2->getValue() - C1->getValue());
+ return replaceInstUsesWith(SI, Builder.CreateAdd(And, C1));
}
}
}
@@ -684,19 +684,19 @@ Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
// (X & Y) == 0 ? X : X ^ Y --> X & ~Y
if (TrueWhenUnset && TrueVal == X &&
match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
- V = Builder->CreateAnd(X, ~(*Y));
+ V = Builder.CreateAnd(X, ~(*Y));
// (X & Y) != 0 ? X ^ Y : X --> X & ~Y
else if (!TrueWhenUnset && FalseVal == X &&
match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
- V = Builder->CreateAnd(X, ~(*Y));
+ V = Builder.CreateAnd(X, ~(*Y));
// (X & Y) == 0 ? X ^ Y : X --> X | Y
else if (TrueWhenUnset && FalseVal == X &&
match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
- V = Builder->CreateOr(X, *Y);
+ V = Builder.CreateOr(X, *Y);
// (X & Y) != 0 ? X : X ^ Y --> X | Y
else if (!TrueWhenUnset && TrueVal == X &&
match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
- V = Builder->CreateOr(X, *Y);
+ V = Builder.CreateOr(X, *Y);
if (V)
return replaceInstUsesWith(SI, V);
@@ -809,8 +809,8 @@ Instruction *InstCombiner::foldSPFofSPF(Instruction *Inner,
(SPF1 == SPF_NABS && SPF2 == SPF_ABS)) {
SelectInst *SI = cast<SelectInst>(Inner);
Value *NewSI =
- Builder->CreateSelect(SI->getCondition(), SI->getFalseValue(),
- SI->getTrueValue(), SI->getName(), SI);
+ Builder.CreateSelect(SI->getCondition(), SI->getFalseValue(),
+ SI->getTrueValue(), SI->getName(), SI);
return replaceInstUsesWith(Outer, NewSI);
}
@@ -848,15 +848,15 @@ Instruction *InstCombiner::foldSPFofSPF(Instruction *Inner,
IsFreeOrProfitableToInvert(B, NotB, ElidesXor) &&
IsFreeOrProfitableToInvert(C, NotC, ElidesXor) && ElidesXor) {
if (!NotA)
- NotA = Builder->CreateNot(A);
+ NotA = Builder.CreateNot(A);
if (!NotB)
- NotB = Builder->CreateNot(B);
+ NotB = Builder.CreateNot(B);
if (!NotC)
- NotC = Builder->CreateNot(C);
+ NotC = Builder.CreateNot(C);
Value *NewInner = generateMinMaxSelectPattern(
Builder, getInverseMinMaxSelectPattern(SPF1), NotA, NotB);
- Value *NewOuter = Builder->CreateNot(generateMinMaxSelectPattern(
+ Value *NewOuter = Builder.CreateNot(generateMinMaxSelectPattern(
Builder, getInverseMinMaxSelectPattern(SPF2), NewInner, NotC));
return replaceInstUsesWith(Outer, NewOuter);
}
@@ -868,9 +868,9 @@ Instruction *InstCombiner::foldSPFofSPF(Instruction *Inner,
/// icmp instruction with zero, and we have an 'and' with the non-constant value
/// and a power of two we can turn the select into a shift on the result of the
/// 'and'.
-static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
- ConstantInt *FalseVal,
- InstCombiner::BuilderTy *Builder) {
+static Value *foldSelectICmpAnd(const SelectInst &SI, APInt TrueVal,
+ APInt FalseVal,
+ InstCombiner::BuilderTy &Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
if (!IC || !IC->isEquality() || !SI.getType()->isIntegerTy())
return nullptr;
@@ -886,56 +886,53 @@ static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
// If both select arms are non-zero see if we have a select of the form
// 'x ? 2^n + C : C'. Then we can offset both arms by C, use the logic
// for 'x ? 2^n : 0' and fix the thing up at the end.
- ConstantInt *Offset = nullptr;
- if (!TrueVal->isZero() && !FalseVal->isZero()) {
- if ((TrueVal->getValue() - FalseVal->getValue()).isPowerOf2())
+ APInt Offset(TrueVal.getBitWidth(), 0);
+ if (!TrueVal.isNullValue() && !FalseVal.isNullValue()) {
+ if ((TrueVal - FalseVal).isPowerOf2())
Offset = FalseVal;
- else if ((FalseVal->getValue() - TrueVal->getValue()).isPowerOf2())
+ else if ((FalseVal - TrueVal).isPowerOf2())
Offset = TrueVal;
else
return nullptr;
// Adjust TrueVal and FalseVal to the offset.
- TrueVal = ConstantInt::get(Builder->getContext(),
- TrueVal->getValue() - Offset->getValue());
- FalseVal = ConstantInt::get(Builder->getContext(),
- FalseVal->getValue() - Offset->getValue());
+ TrueVal -= Offset;
+ FalseVal -= Offset;
}
// Make sure the mask in the 'and' and one of the select arms is a power of 2.
if (!AndRHS->getValue().isPowerOf2() ||
- (!TrueVal->getValue().isPowerOf2() &&
- !FalseVal->getValue().isPowerOf2()))
+ (!TrueVal.isPowerOf2() && !FalseVal.isPowerOf2()))
return nullptr;
// Determine which shift is needed to transform result of the 'and' into the
// desired result.
- ConstantInt *ValC = !TrueVal->isZero() ? TrueVal : FalseVal;
- unsigned ValZeros = ValC->getValue().logBase2();
+ const APInt &ValC = !TrueVal.isNullValue() ? TrueVal : FalseVal;
+ unsigned ValZeros = ValC.logBase2();
unsigned AndZeros = AndRHS->getValue().logBase2();
// If types don't match we can still convert the select by introducing a zext
// or a trunc of the 'and'. The trunc case requires that all of the truncated
// bits are zero, we can figure that out by looking at the 'and' mask.
- if (AndZeros >= ValC->getBitWidth())
+ if (AndZeros >= ValC.getBitWidth())
return nullptr;
- Value *V = Builder->CreateZExtOrTrunc(LHS, SI.getType());
+ Value *V = Builder.CreateZExtOrTrunc(LHS, SI.getType());
if (ValZeros > AndZeros)
- V = Builder->CreateShl(V, ValZeros - AndZeros);
+ V = Builder.CreateShl(V, ValZeros - AndZeros);
else if (ValZeros < AndZeros)
- V = Builder->CreateLShr(V, AndZeros - ValZeros);
+ V = Builder.CreateLShr(V, AndZeros - ValZeros);
// Okay, now we know that everything is set up, we just don't know whether we
// have a icmp_ne or icmp_eq and whether the true or false val is the zero.
- bool ShouldNotVal = !TrueVal->isZero();
+ bool ShouldNotVal = !TrueVal.isNullValue();
ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
if (ShouldNotVal)
- V = Builder->CreateXor(V, ValC);
+ V = Builder.CreateXor(V, ValC);
// Apply an offset if needed.
- if (Offset)
- V = Builder->CreateAdd(V, Offset);
+ if (!Offset.isNullValue())
+ V = Builder.CreateAdd(V, ConstantInt::get(V->getType(), Offset));
return V;
}
@@ -1024,7 +1021,7 @@ Instruction *InstCombiner::foldSelectExtConst(SelectInst &Sel) {
// TODO: Handle larger types? That requires adjusting FoldOpIntoSelect too.
Value *X = ExtInst->getOperand(0);
Type *SmallType = X->getType();
- if (!SmallType->getScalarType()->isIntegerTy(1))
+ if (!SmallType->isIntOrIntVectorTy(1))
return nullptr;
Constant *C;
@@ -1045,7 +1042,7 @@ Instruction *InstCombiner::foldSelectExtConst(SelectInst &Sel) {
// select Cond, (ext X), C --> ext(select Cond, X, C')
// select Cond, C, (ext X) --> ext(select Cond, C', X)
- Value *NewSel = Builder->CreateSelect(Cond, X, TruncCVal, "narrow", &Sel);
+ Value *NewSel = Builder.CreateSelect(Cond, X, TruncCVal, "narrow", &Sel);
return CastInst::Create(Instruction::CastOps(ExtOpcode), NewSel, SelType);
}
@@ -1184,7 +1181,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return &SI;
}
- if (SelType->getScalarType()->isIntegerTy(1) &&
+ if (SelType->isIntOrIntVectorTy(1) &&
TrueVal->getType() == CondVal->getType()) {
if (match(TrueVal, m_One())) {
// Change: A = select B, true, C --> A = or B, C
@@ -1192,7 +1189,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
if (match(TrueVal, m_Zero())) {
// Change: A = select B, false, C --> A = and !B, C
- Value *NotCond = Builder->CreateNot(CondVal, "not." + CondVal->getName());
+ Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
return BinaryOperator::CreateAnd(NotCond, FalseVal);
}
if (match(FalseVal, m_Zero())) {
@@ -1201,7 +1198,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
if (match(FalseVal, m_One())) {
// Change: A = select B, C, true --> A = or !B, C
- Value *NotCond = Builder->CreateNot(CondVal, "not." + CondVal->getName());
+ Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
return BinaryOperator::CreateOr(NotCond, TrueVal);
}
@@ -1226,7 +1223,8 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// select i1 %c, <2 x i8> <1, 1>, <2 x i8> <0, 0>
// because that may need 3 instructions to splat the condition value:
// extend, insertelement, shufflevector.
- if (CondVal->getType()->isVectorTy() == SelType->isVectorTy()) {
+ if (SelType->isIntOrIntVectorTy() &&
+ CondVal->getType()->isVectorTy() == SelType->isVectorTy()) {
// select C, 1, 0 -> zext C to int
if (match(TrueVal, m_One()) && match(FalseVal, m_Zero()))
return new ZExtInst(CondVal, SelType);
@@ -1237,20 +1235,21 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// select C, 0, 1 -> zext !C to int
if (match(TrueVal, m_Zero()) && match(FalseVal, m_One())) {
- Value *NotCond = Builder->CreateNot(CondVal, "not." + CondVal->getName());
+ Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
return new ZExtInst(NotCond, SelType);
}
// select C, 0, -1 -> sext !C to int
if (match(TrueVal, m_Zero()) && match(FalseVal, m_AllOnes())) {
- Value *NotCond = Builder->CreateNot(CondVal, "not." + CondVal->getName());
+ Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
return new SExtInst(NotCond, SelType);
}
}
if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal))
- if (Value *V = foldSelectICmpAnd(SI, TrueValC, FalseValC, Builder))
+ if (Value *V = foldSelectICmpAnd(SI, TrueValC->getValue(),
+ FalseValC->getValue(), Builder))
return replaceInstUsesWith(SI, V);
// See if we are selecting two values based on a comparison of the two values.
@@ -1288,10 +1287,10 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// (X ugt Y) ? X : Y -> (X ole Y) ? Y : X
if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) {
FCmpInst::Predicate InvPred = FCI->getInversePredicate();
- IRBuilder<>::FastMathFlagGuard FMFG(*Builder);
- Builder->setFastMathFlags(FCI->getFastMathFlags());
- Value *NewCond = Builder->CreateFCmp(InvPred, TrueVal, FalseVal,
- FCI->getName() + ".inv");
+ IRBuilder<>::FastMathFlagGuard FMFG(Builder);
+ Builder.setFastMathFlags(FCI->getFastMathFlags());
+ Value *NewCond = Builder.CreateFCmp(InvPred, TrueVal, FalseVal,
+ FCI->getName() + ".inv");
return SelectInst::Create(NewCond, FalseVal, TrueVal,
SI.getName() + ".p");
@@ -1331,10 +1330,10 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// (X ugt Y) ? X : Y -> (X ole Y) ? X : Y
if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) {
FCmpInst::Predicate InvPred = FCI->getInversePredicate();
- IRBuilder<>::FastMathFlagGuard FMFG(*Builder);
- Builder->setFastMathFlags(FCI->getFastMathFlags());
- Value *NewCond = Builder->CreateFCmp(InvPred, FalseVal, TrueVal,
- FCI->getName() + ".inv");
+ IRBuilder<>::FastMathFlagGuard FMFG(Builder);
+ Builder.setFastMathFlags(FCI->getFastMathFlags());
+ Value *NewCond = Builder.CreateFCmp(InvPred, FalseVal, TrueVal,
+ FCI->getName() + ".inv");
return SelectInst::Create(NewCond, FalseVal, TrueVal,
SI.getName() + ".p");
@@ -1350,7 +1349,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (Instruction *Result = foldSelectInstWithICmp(SI, ICI))
return Result;
- if (Instruction *Add = foldAddSubSelect(SI, *Builder))
+ if (Instruction *Add = foldAddSubSelect(SI, Builder))
return Add;
// Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
@@ -1381,16 +1380,16 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *Cmp;
if (CmpInst::isIntPredicate(Pred)) {
- Cmp = Builder->CreateICmp(Pred, LHS, RHS);
+ Cmp = Builder.CreateICmp(Pred, LHS, RHS);
} else {
- IRBuilder<>::FastMathFlagGuard FMFG(*Builder);
+ IRBuilder<>::FastMathFlagGuard FMFG(Builder);
auto FMF = cast<FPMathOperator>(SI.getCondition())->getFastMathFlags();
- Builder->setFastMathFlags(FMF);
- Cmp = Builder->CreateFCmp(Pred, LHS, RHS);
+ Builder.setFastMathFlags(FMF);
+ Cmp = Builder.CreateFCmp(Pred, LHS, RHS);
}
- Value *NewSI = Builder->CreateCast(
- CastOp, Builder->CreateSelect(Cmp, LHS, RHS, SI.getName(), &SI),
+ Value *NewSI = Builder.CreateCast(
+ CastOp, Builder.CreateSelect(Cmp, LHS, RHS, SI.getName(), &SI),
SelType);
return replaceInstUsesWith(SI, NewSI);
}
@@ -1425,13 +1424,12 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
(SI.hasOneUse() && match(*SI.user_begin(), m_Not(m_Value())));
if (NumberOfNots >= 2) {
- Value *NewLHS = Builder->CreateNot(LHS);
- Value *NewRHS = Builder->CreateNot(RHS);
- Value *NewCmp = SPF == SPF_SMAX
- ? Builder->CreateICmpSLT(NewLHS, NewRHS)
- : Builder->CreateICmpULT(NewLHS, NewRHS);
+ Value *NewLHS = Builder.CreateNot(LHS);
+ Value *NewRHS = Builder.CreateNot(RHS);
+ Value *NewCmp = SPF == SPF_SMAX ? Builder.CreateICmpSLT(NewLHS, NewRHS)
+ : Builder.CreateICmpULT(NewLHS, NewRHS);
Value *NewSI =
- Builder->CreateNot(Builder->CreateSelect(NewCmp, NewLHS, NewRHS));
+ Builder.CreateNot(Builder.CreateSelect(NewCmp, NewLHS, NewRHS));
return replaceInstUsesWith(SI, NewSI);
}
}
@@ -1461,7 +1459,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// We choose this as normal form to enable folding on the And and shortening
// paths for the values (this helps GetUnderlyingObjects() for example).
if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) {
- Value *And = Builder->CreateAnd(CondVal, TrueSI->getCondition());
+ Value *And = Builder.CreateAnd(CondVal, TrueSI->getCondition());
SI.setOperand(0, And);
SI.setOperand(1, TrueSI->getTrueValue());
return &SI;
@@ -1479,7 +1477,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
// select(C0, a, select(C1, a, b)) -> select(C0|C1, a, b)
if (FalseSI->getTrueValue() == TrueVal && FalseSI->hasOneUse()) {
- Value *Or = Builder->CreateOr(CondVal, FalseSI->getCondition());
+ Value *Or = Builder.CreateOr(CondVal, FalseSI->getCondition());
SI.setOperand(0, Or);
SI.setOperand(2, FalseSI->getFalseValue());
return &SI;
@@ -1541,7 +1539,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return replaceInstUsesWith(SI, FalseVal);
}
- if (Instruction *BitCastSel = foldSelectCmpBitcasts(SI, *Builder))
+ if (Instruction *BitCastSel = foldSelectCmpBitcasts(SI, Builder))
return BitCastSel;
return nullptr;
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 1bb1a85367d1..7ed141c7fd79 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -47,7 +47,7 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
if (isKnownNonNegative(A, DL, 0, &AC, &I, &DT) &&
isKnownNonNegative(C, DL, 0, &AC, &I, &DT))
return BinaryOperator::Create(
- I.getOpcode(), Builder->CreateBinOp(I.getOpcode(), Op0, C), A);
+ I.getOpcode(), Builder.CreateBinOp(I.getOpcode(), Op0, C), A);
// X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2.
// Because shifts by negative values (which could occur if A were negative)
@@ -56,8 +56,8 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Power2(B)))) {
// FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
// demand the sign bit (and many others) here??
- Value *Rem = Builder->CreateAnd(A, ConstantInt::get(I.getType(), *B-1),
- Op1->getName());
+ Value *Rem = Builder.CreateAnd(A, ConstantInt::get(I.getType(), *B - 1),
+ Op1->getName());
I.setOperand(1, Rem);
return &I;
}
@@ -260,9 +260,9 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
// We can always evaluate constants shifted.
if (Constant *C = dyn_cast<Constant>(V)) {
if (isLeftShift)
- V = IC.Builder->CreateShl(C, NumBits);
+ V = IC.Builder.CreateShl(C, NumBits);
else
- V = IC.Builder->CreateLShr(C, NumBits);
+ V = IC.Builder.CreateLShr(C, NumBits);
// If we got a constantexpr back, try to simplify it with TD info.
if (auto *C = dyn_cast<Constant>(V))
if (auto *FoldedC =
@@ -289,7 +289,7 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
case Instruction::Shl:
case Instruction::LShr:
return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
- *(IC.Builder));
+ IC.Builder);
case Instruction::Select:
I->setOperand(
@@ -353,7 +353,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
Constant *ShAmt =
ConstantExpr::getZExt(cast<Constant>(Op1), TrOp->getType());
// (shift2 (shift1 & 0x00FF), c2)
- Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
+ Value *NSh = Builder.CreateBinOp(I.getOpcode(), TrOp, ShAmt, I.getName());
// For logical shifts, the truncation has the effect of making the high
// part of the register be zeros. Emulate this by inserting an AND to
@@ -375,9 +375,9 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
}
// shift1 & 0x00FF
- Value *And = Builder->CreateAnd(NSh,
- ConstantInt::get(I.getContext(), MaskV),
- TI->getName());
+ Value *And = Builder.CreateAnd(NSh,
+ ConstantInt::get(I.getContext(), MaskV),
+ TI->getName());
// Return the value truncated to the interesting size.
return new TruncInst(And, I.getType());
@@ -401,10 +401,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
m_Specific(Op1)))) {
Value *YS = // (Y << C)
- Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
+ Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
// (X + (Y << C))
- Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
- Op0BO->getOperand(1)->getName());
+ Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), YS, V1,
+ Op0BO->getOperand(1)->getName());
unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
@@ -421,11 +421,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))),
m_ConstantInt(CC)))) {
Value *YS = // (Y << C)
- Builder->CreateShl(Op0BO->getOperand(0), Op1,
- Op0BO->getName());
+ Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
// X & (CC << C)
- Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
- V1->getName()+".mask");
+ Value *XM = Builder.CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
+ V1->getName()+".mask");
return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
}
LLVM_FALLTHROUGH;
@@ -437,10 +436,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
m_Specific(Op1)))) {
Value *YS = // (Y << C)
- Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
+ Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
// (X + (Y << C))
- Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
- Op0BO->getOperand(0)->getName());
+ Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), V1, YS,
+ Op0BO->getOperand(0)->getName());
unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
@@ -456,10 +455,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
m_And(m_OneUse(m_Shr(m_Value(V1), m_Value(V2))),
m_ConstantInt(CC))) && V2 == Op1) {
Value *YS = // (Y << C)
- Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
+ Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
// X & (CC << C)
- Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
- V1->getName()+".mask");
+ Value *XM = Builder.CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
+ V1->getName()+".mask");
return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
}
@@ -502,7 +501,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
Value *NewShift =
- Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
+ Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
NewShift->takeName(Op0BO);
return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
@@ -541,7 +540,7 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
unsigned SrcWidth = X->getType()->getScalarSizeInBits();
if (ShAmt < SrcWidth &&
MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmt), 0, &I))
- return new ZExtInst(Builder->CreateShl(X, ShAmt), Ty);
+ return new ZExtInst(Builder.CreateShl(X, ShAmt), Ty);
}
// (X >>u C) << C --> X & (-1 << C)
@@ -641,7 +640,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
// ctpop.i32(x)>>5 --> zext(x == -1)
bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0);
- Value *Cmp = Builder->CreateICmpEQ(II->getArgOperand(0), RHS);
+ Value *Cmp = Builder.CreateICmpEQ(II->getArgOperand(0), RHS);
return new ZExtInst(Cmp, Ty);
}
@@ -658,7 +657,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
return NewLShr;
}
// (X << C1) >>u C2 --> (X >>u (C2 - C1)) & (-1 >> C2)
- Value *NewLShr = Builder->CreateLShr(X, ShiftDiff, "", I.isExact());
+ Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
}
@@ -671,7 +670,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
return NewShl;
}
// (X << C1) >>u C2 --> X << (C1 - C2) & (-1 >> C2)
- Value *NewShl = Builder->CreateShl(X, ShiftDiff);
+ Value *NewShl = Builder.CreateShl(X, ShiftDiff);
APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
}
@@ -692,7 +691,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
// lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN
if (Op0->hasOneUse()) {
- Value *NewLShr = Builder->CreateLShr(X, SrcTyBitWidth - 1);
+ Value *NewLShr = Builder.CreateLShr(X, SrcTyBitWidth - 1);
return new ZExtInst(NewLShr, Ty);
}
}
@@ -701,7 +700,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
if (ShAmt == BitWidth - SrcTyBitWidth && Op0->hasOneUse()) {
// The new shift amount can't be more than the narrow source type.
unsigned NewShAmt = std::min(ShAmt, SrcTyBitWidth - 1);
- Value *AShr = Builder->CreateAShr(X, NewShAmt);
+ Value *AShr = Builder.CreateAShr(X, NewShAmt);
return new ZExtInst(AShr, Ty);
}
}
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 03841164b58d..5689c0604239 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -548,7 +548,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
// X % -1 demands all the bits because we don't want to introduce
// INT_MIN % -1 (== undef) by accident.
- if (Rem->isAllOnesValue())
+ if (Rem->isMinusOne())
break;
APInt RA = Rem->getValue().abs();
if (RA.isPowerOf2()) {
@@ -1627,10 +1627,10 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I)
Args.push_back(II->getArgOperand(I));
- IRBuilderBase::InsertPointGuard Guard(*Builder);
- Builder->SetInsertPoint(II);
+ IRBuilderBase::InsertPointGuard Guard(Builder);
+ Builder.SetInsertPoint(II);
- CallInst *NewCall = Builder->CreateCall(NewIntrin, Args);
+ CallInst *NewCall = Builder.CreateCall(NewIntrin, Args);
NewCall->takeName(II);
NewCall->copyMetadata(*II);
@@ -1657,15 +1657,15 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
if (NewNumElts == 1) {
- return Builder->CreateInsertElement(UndefValue::get(V->getType()),
- NewCall, static_cast<uint64_t>(0));
+ return Builder.CreateInsertElement(UndefValue::get(V->getType()),
+ NewCall, static_cast<uint64_t>(0));
}
SmallVector<uint32_t, 8> EltMask;
for (unsigned I = 0; I < VWidth; ++I)
EltMask.push_back(I);
- Value *Shuffle = Builder->CreateShuffleVector(
+ Value *Shuffle = Builder.CreateShuffleVector(
NewCall, UndefValue::get(NewTy), EltMask);
MadeChange = true;
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 926e46655eb8..dd71a31b644b 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -204,11 +204,11 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
if (I->hasOneUse() &&
cheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
Value *newEI0 =
- Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
- EI.getName()+".lhs");
+ Builder.CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
+ EI.getName()+".lhs");
Value *newEI1 =
- Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
- EI.getName()+".rhs");
+ Builder.CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
+ EI.getName()+".rhs");
return BinaryOperator::CreateWithCopiedFlags(BO->getOpcode(),
newEI0, newEI1, BO);
}
@@ -250,8 +250,8 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// Bitcasts can change the number of vector elements, and they cost
// nothing.
if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) {
- Value *EE = Builder->CreateExtractElement(CI->getOperand(0),
- EI.getIndexOperand());
+ Value *EE = Builder.CreateExtractElement(CI->getOperand(0),
+ EI.getIndexOperand());
Worklist.AddValue(EE);
return CastInst::Create(CI->getOpcode(), EE, EI.getType());
}
@@ -269,20 +269,20 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
Value *Cond = SI->getCondition();
if (Cond->getType()->isVectorTy()) {
- Cond = Builder->CreateExtractElement(Cond,
- EI.getIndexOperand(),
- Cond->getName() + ".elt");
+ Cond = Builder.CreateExtractElement(Cond,
+ EI.getIndexOperand(),
+ Cond->getName() + ".elt");
}
Value *V1Elem
- = Builder->CreateExtractElement(TrueVal,
- EI.getIndexOperand(),
- TrueVal->getName() + ".elt");
+ = Builder.CreateExtractElement(TrueVal,
+ EI.getIndexOperand(),
+ TrueVal->getName() + ".elt");
Value *V2Elem
- = Builder->CreateExtractElement(FalseVal,
- EI.getIndexOperand(),
- FalseVal->getName() + ".elt");
+ = Builder.CreateExtractElement(FalseVal,
+ EI.getIndexOperand(),
+ FalseVal->getName() + ".elt");
return SelectInst::Create(Cond,
V1Elem,
V2Elem,
@@ -837,7 +837,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
if (Instruction *Shuf = foldConstantInsEltIntoShuffle(IE))
return Shuf;
- if (Instruction *NewInsElt = hoistInsEltConst(IE, *Builder))
+ if (Instruction *NewInsElt = hoistInsEltConst(IE, Builder))
return NewInsElt;
// Turn a sequence of inserts that broadcasts a scalar into a single
@@ -1020,9 +1020,9 @@ InstCombiner::EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask) {
SmallVector<Constant *, 16> MaskValues;
for (int i = 0, e = Mask.size(); i != e; ++i) {
if (Mask[i] == -1)
- MaskValues.push_back(UndefValue::get(Builder->getInt32Ty()));
+ MaskValues.push_back(UndefValue::get(Builder.getInt32Ty()));
else
- MaskValues.push_back(Builder->getInt32(Mask[i]));
+ MaskValues.push_back(Builder.getInt32(Mask[i]));
}
return ConstantExpr::getShuffleVector(C, UndefValue::get(C->getType()),
ConstantVector::get(MaskValues));
@@ -1095,7 +1095,7 @@ InstCombiner::EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask) {
Value *V = EvaluateInDifferentElementOrder(I->getOperand(0), Mask);
return InsertElementInst::Create(V, I->getOperand(1),
- Builder->getInt32(Index), "", I);
+ Builder.getInt32(Index), "", I);
}
}
llvm_unreachable("failed to reorder elements of vector instruction!");
@@ -1275,9 +1275,9 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
UndefValue::get(Int32Ty));
for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I)
ShuffleMask[I] = ConstantInt::get(Int32Ty, Idx);
- V = Builder->CreateShuffleVector(V, UndefValue::get(V->getType()),
- ConstantVector::get(ShuffleMask),
- SVI.getName() + ".extract");
+ V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()),
+ ConstantVector::get(ShuffleMask),
+ SVI.getName() + ".extract");
BegIdx = 0;
}
unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth;
@@ -1287,10 +1287,10 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
auto *NewBC =
BCAlreadyExists
? NewBCs[CastSrcTy]
- : Builder->CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc");
+ : Builder.CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc");
if (!BCAlreadyExists)
NewBCs[CastSrcTy] = NewBC;
- auto *Ext = Builder->CreateExtractElement(
+ auto *Ext = Builder.CreateExtractElement(
NewBC, ConstantInt::get(Int32Ty, BegIdx), SVI.getName() + ".extract");
// The shufflevector isn't being replaced: the bitcast that used it
// is. InstCombine will visit the newly-created instructions.
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 723414635d6f..90e232399155 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -88,7 +88,7 @@ MaxArraySize("instcombine-maxarray-size", cl::init(1024),
cl::desc("Maximum array size considered when doing a combine"));
Value *InstCombiner::EmitGEPOffset(User *GEP) {
- return llvm::EmitGEPOffset(Builder, DL, GEP);
+ return llvm::EmitGEPOffset(&Builder, DL, GEP);
}
/// Return true if it is desirable to convert an integer computation from a
@@ -498,8 +498,7 @@ getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode,
/// This tries to simplify binary operations by factorizing out common terms
/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
-Value *InstCombiner::tryFactorization(InstCombiner::BuilderTy *Builder,
- BinaryOperator &I,
+Value *InstCombiner::tryFactorization(BinaryOperator &I,
Instruction::BinaryOps InnerOpcode,
Value *A, Value *B, Value *C, Value *D) {
assert(A && B && C && D && "All values must be provided");
@@ -525,9 +524,9 @@ Value *InstCombiner::tryFactorization(InstCombiner::BuilderTy *Builder,
// If "B op D" doesn't simplify then only go on if both of the existing
// operations "A op' B" and "C op' D" will be zapped as no longer used.
if (!V && LHS->hasOneUse() && RHS->hasOneUse())
- V = Builder->CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
+ V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
if (V) {
- SimplifiedInst = Builder->CreateBinOp(InnerOpcode, A, V);
+ SimplifiedInst = Builder.CreateBinOp(InnerOpcode, A, V);
}
}
@@ -545,9 +544,9 @@ Value *InstCombiner::tryFactorization(InstCombiner::BuilderTy *Builder,
// If "A op C" doesn't simplify then only go on if both of the existing
// operations "A op' B" and "C op' D" will be zapped as no longer used.
if (!V && LHS->hasOneUse() && RHS->hasOneUse())
- V = Builder->CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
+ V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
if (V) {
- SimplifiedInst = Builder->CreateBinOp(InnerOpcode, V, B);
+ SimplifiedInst = Builder.CreateBinOp(InnerOpcode, V, B);
}
}
@@ -610,7 +609,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
// The instruction has the form "(A op' B) op (C op' D)". Try to factorize
// a common term.
if (Op0 && Op1 && LHSOpcode == RHSOpcode)
- if (Value *V = tryFactorization(Builder, I, LHSOpcode, A, B, C, D))
+ if (Value *V = tryFactorization(I, LHSOpcode, A, B, C, D))
return V;
// The instruction has the form "(A op' B) op (C)". Try to factorize common
@@ -618,7 +617,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
if (Op0)
if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
if (Value *V =
- tryFactorization(Builder, I, LHSOpcode, A, B, RHS, Ident))
+ tryFactorization(I, LHSOpcode, A, B, RHS, Ident))
return V;
// The instruction has the form "(B) op (C op' D)". Try to factorize common
@@ -626,7 +625,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
if (Op1)
if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
if (Value *V =
- tryFactorization(Builder, I, RHSOpcode, LHS, Ident, C, D))
+ tryFactorization(I, RHSOpcode, LHS, Ident, C, D))
return V;
}
@@ -644,7 +643,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
SimplifyBinOp(TopLevelOpcode, B, C, SQ.getWithInstruction(&I))) {
// They do! Return "L op' R".
++NumExpand;
- C = Builder->CreateBinOp(InnerOpcode, L, R);
+ C = Builder.CreateBinOp(InnerOpcode, L, R);
C->takeName(&I);
return C;
}
@@ -663,7 +662,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I))) {
// They do! Return "L op' R".
++NumExpand;
- A = Builder->CreateBinOp(InnerOpcode, L, R);
+ A = Builder.CreateBinOp(InnerOpcode, L, R);
A->takeName(&I);
return A;
}
@@ -678,18 +677,18 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
if (Value *V =
SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
SI1->getFalseValue(), SQ.getWithInstruction(&I)))
- SI = Builder->CreateSelect(SI0->getCondition(),
- Builder->CreateBinOp(TopLevelOpcode,
- SI0->getTrueValue(),
- SI1->getTrueValue()),
- V);
+ SI = Builder.CreateSelect(SI0->getCondition(),
+ Builder.CreateBinOp(TopLevelOpcode,
+ SI0->getTrueValue(),
+ SI1->getTrueValue()),
+ V);
if (Value *V =
SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
SI1->getTrueValue(), SQ.getWithInstruction(&I)))
- SI = Builder->CreateSelect(
+ SI = Builder.CreateSelect(
SI0->getCondition(), V,
- Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
- SI1->getFalseValue()));
+ Builder.CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
+ SI1->getFalseValue()));
if (SI) {
SI->takeName(&I);
return SI;
@@ -751,9 +750,9 @@ Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
}
static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
- InstCombiner *IC) {
+ InstCombiner::BuilderTy &Builder) {
if (auto *Cast = dyn_cast<CastInst>(&I))
- return IC->Builder->CreateCast(Cast->getOpcode(), SO, I.getType());
+ return Builder.CreateCast(Cast->getOpcode(), SO, I.getType());
assert(I.isBinaryOp() && "Unexpected opcode for select folding");
@@ -772,8 +771,8 @@ static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
std::swap(Op0, Op1);
auto *BO = cast<BinaryOperator>(&I);
- Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
- SO->getName() + ".op");
+ Value *RI = Builder.CreateBinOp(BO->getOpcode(), Op0, Op1,
+ SO->getName() + ".op");
auto *FPInst = dyn_cast<Instruction>(RI);
if (FPInst && isa<FPMathOperator>(FPInst))
FPInst->copyFastMathFlags(BO);
@@ -791,7 +790,7 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
return nullptr;
// Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->getScalarType()->isIntegerTy(1))
+ if (SI->getType()->isIntOrIntVectorTy(1))
return nullptr;
// If it's a bitcast involving vectors, make sure it has the same number of
@@ -825,13 +824,13 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
}
}
- Value *NewTV = foldOperationIntoSelectOperand(Op, TV, this);
- Value *NewFV = foldOperationIntoSelectOperand(Op, FV, this);
+ Value *NewTV = foldOperationIntoSelectOperand(Op, TV, Builder);
+ Value *NewFV = foldOperationIntoSelectOperand(Op, FV, Builder);
return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
}
static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV,
- InstCombiner *IC) {
+ InstCombiner::BuilderTy &Builder) {
bool ConstIsRHS = isa<Constant>(I->getOperand(1));
Constant *C = cast<Constant>(I->getOperand(ConstIsRHS));
@@ -845,7 +844,7 @@ static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV,
if (!ConstIsRHS)
std::swap(Op0, Op1);
- Value *RI = IC->Builder->CreateBinOp(I->getOpcode(), Op0, Op1, "phitmp");
+ Value *RI = Builder.CreateBinOp(I->getOpcode(), Op0, Op1, "phitmp");
auto *FPInst = dyn_cast<Instruction>(RI);
if (FPInst && isa<FPMathOperator>(FPInst))
FPInst->copyFastMathFlags(I);
@@ -916,7 +915,7 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
// If we are going to have to insert a new computation, do so right before the
// predecessor's terminator.
if (NonConstBB)
- Builder->SetInsertPoint(NonConstBB->getTerminator());
+ Builder.SetInsertPoint(NonConstBB->getTerminator());
// Next, add all of the operands to the PHI.
if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
@@ -948,9 +947,9 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
// folded to TrueVInPred or FalseVInPred as done for ConstantInt. For
// non-vector phis, this transformation was always profitable because
// the select would be generated exactly once in the NonConstBB.
- Builder->SetInsertPoint(ThisBB->getTerminator());
- InV = Builder->CreateSelect(PN->getIncomingValue(i),
- TrueVInPred, FalseVInPred, "phitmp");
+ Builder.SetInsertPoint(ThisBB->getTerminator());
+ InV = Builder.CreateSelect(PN->getIncomingValue(i), TrueVInPred,
+ FalseVInPred, "phitmp");
}
NewPN->addIncoming(InV, ThisBB);
}
@@ -961,16 +960,17 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
else if (isa<ICmpInst>(CI))
- InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
- C, "phitmp");
+ InV = Builder.CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
+ C, "phitmp");
else
- InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
- C, "phitmp");
+ InV = Builder.CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
+ C, "phitmp");
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
} else if (auto *BO = dyn_cast<BinaryOperator>(&I)) {
for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV = foldOperationIntoPhiValue(BO, PN->getIncomingValue(i), this);
+ Value *InV = foldOperationIntoPhiValue(BO, PN->getIncomingValue(i),
+ Builder);
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
} else {
@@ -981,8 +981,8 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
else
- InV = Builder->CreateCast(CI->getOpcode(),
- PN->getIncomingValue(i), I.getType(), "phitmp");
+ InV = Builder.CreateCast(CI->getOpcode(), PN->getIncomingValue(i),
+ I.getType(), "phitmp");
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
}
@@ -1328,8 +1328,8 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
/// \brief Creates node of binary operation with the same attributes as the
/// specified one but with other operands.
static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
- InstCombiner::BuilderTy *B) {
- Value *BO = B->CreateBinOp(Inst.getOpcode(), LHS, RHS);
+ InstCombiner::BuilderTy &B) {
+ Value *BO = B.CreateBinOp(Inst.getOpcode(), LHS, RHS);
// If LHS and RHS are constant, BO won't be a binary operator.
if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BO))
NewBO->copyIRFlags(&Inst);
@@ -1365,7 +1365,7 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType()) {
Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
RShuf->getOperand(0), Builder);
- return Builder->CreateShuffleVector(
+ return Builder.CreateShuffleVector(
NewBO, UndefValue::get(NewBO->getType()), LShuf->getMask());
}
@@ -1404,7 +1404,7 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
Value *NewLHS = isa<Constant>(LHS) ? C2 : Shuffle->getOperand(0);
Value *NewRHS = isa<Constant>(LHS) ? Shuffle->getOperand(0) : C2;
Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
- return Builder->CreateShuffleVector(NewBO,
+ return Builder.CreateShuffleVector(NewBO,
UndefValue::get(Inst.getType()), Shuffle->getMask());
}
}
@@ -1452,7 +1452,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious.
- *I = Builder->CreateIntCast(*I, NewIndexType, true);
+ *I = Builder.CreateIntCast(*I, NewIndexType, true);
MadeChange = true;
}
}
@@ -1546,10 +1546,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// set that index.
PHINode *NewPN;
{
- IRBuilderBase::InsertPointGuard Guard(*Builder);
- Builder->SetInsertPoint(PN);
- NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(),
- PN->getNumOperands());
+ IRBuilderBase::InsertPointGuard Guard(Builder);
+ Builder.SetInsertPoint(PN);
+ NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
+ PN->getNumOperands());
}
for (auto &I : PN->operands())
@@ -1669,8 +1669,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// pointer arithmetic.
if (match(V, m_Neg(m_PtrToInt(m_Value())))) {
Operator *Index = cast<Operator>(V);
- Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
- Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
+ Value *PtrToInt = Builder.CreatePtrToInt(PtrOp, Index->getType());
+ Value *NewSub = Builder.CreateSub(PtrToInt, Index->getOperand(1));
return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
}
// Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X))
@@ -1723,7 +1723,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// ->
// %0 = GEP i8 addrspace(1)* X, ...
// addrspacecast i8 addrspace(1)* %0 to i8*
- return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType());
+ return new AddrSpaceCastInst(Builder.Insert(Res), GEP.getType());
}
if (ArrayType *XATy =
@@ -1751,10 +1751,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// addrspacecast i8 addrspace(1)* %0 to i8*
SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
Value *NewGEP = GEP.isInBounds()
- ? Builder->CreateInBoundsGEP(
+ ? Builder.CreateInBoundsGEP(
nullptr, StrippedPtr, Idx, GEP.getName())
- : Builder->CreateGEP(nullptr, StrippedPtr, Idx,
- GEP.getName());
+ : Builder.CreateGEP(nullptr, StrippedPtr, Idx,
+ GEP.getName());
return new AddrSpaceCastInst(NewGEP, GEP.getType());
}
}
@@ -1772,9 +1772,9 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
Value *NewGEP =
GEP.isInBounds()
- ? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, Idx,
- GEP.getName())
- : Builder->CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName());
+ ? Builder.CreateInBoundsGEP(nullptr, StrippedPtr, Idx,
+ GEP.getName())
+ : Builder.CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName());
// V and GEP are both pointer types --> BitCast
return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
@@ -1807,10 +1807,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// GEP may not be "inbounds".
Value *NewGEP =
GEP.isInBounds() && NSW
- ? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx,
- GEP.getName())
- : Builder->CreateGEP(nullptr, StrippedPtr, NewIdx,
- GEP.getName());
+ ? Builder.CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx,
+ GEP.getName())
+ : Builder.CreateGEP(nullptr, StrippedPtr, NewIdx,
+ GEP.getName());
// The NewGEP must be pointer typed, so must the old one -> BitCast
return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
@@ -1849,10 +1849,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
NewIdx};
Value *NewGEP = GEP.isInBounds() && NSW
- ? Builder->CreateInBoundsGEP(
+ ? Builder.CreateInBoundsGEP(
SrcElTy, StrippedPtr, Off, GEP.getName())
- : Builder->CreateGEP(SrcElTy, StrippedPtr, Off,
- GEP.getName());
+ : Builder.CreateGEP(SrcElTy, StrippedPtr, Off,
+ GEP.getName());
// The NewGEP must be pointer typed, so must the old one -> BitCast
return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
GEP.getType());
@@ -1916,8 +1916,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
Value *NGEP =
GEP.isInBounds()
- ? Builder->CreateInBoundsGEP(nullptr, Operand, NewIndices)
- : Builder->CreateGEP(nullptr, Operand, NewIndices);
+ ? Builder.CreateInBoundsGEP(nullptr, Operand, NewIndices)
+ : Builder.CreateGEP(nullptr, Operand, NewIndices);
if (NGEP->getType() == GEP.getType())
return replaceInstUsesWith(GEP, NGEP);
@@ -2166,8 +2166,8 @@ Instruction *InstCombiner::visitFree(CallInst &FI) {
// free undef -> unreachable.
if (isa<UndefValue>(Op)) {
// Insert a new store to null because we cannot modify the CFG here.
- Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
- UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
+ Builder.CreateStore(ConstantInt::getTrue(FI.getContext()),
+ UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
return eraseInstFromFunction(FI);
}
@@ -2281,8 +2281,8 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
// the backend should extend back to a legal type for the target.
if (NewWidth > 0 && NewWidth < Known.getBitWidth()) {
IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
- Builder->SetInsertPoint(&SI);
- Value *NewCond = Builder->CreateTrunc(Cond, Ty, "trunc");
+ Builder.SetInsertPoint(&SI);
+ Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
SI.setCondition(NewCond);
for (auto Case : SI.cases()) {
@@ -2339,8 +2339,8 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// %E = insertvalue { i32 } %X, i32 42, 0
// by switching the order of the insert and extract (though the
// insertvalue should be left in, since it may have other uses).
- Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
- EV.getIndices());
+ Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
+ EV.getIndices());
return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
makeArrayRef(insi, inse));
}
@@ -2415,17 +2415,17 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// extractvalue has integer indices, getelementptr has Value*s. Convert.
SmallVector<Value*, 4> Indices;
// Prefix an i32 0 since we need the first element.
- Indices.push_back(Builder->getInt32(0));
+ Indices.push_back(Builder.getInt32(0));
for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
I != E; ++I)
- Indices.push_back(Builder->getInt32(*I));
+ Indices.push_back(Builder.getInt32(*I));
// We need to insert these at the location of the old load, not at that of
// the extractvalue.
- Builder->SetInsertPoint(L);
- Value *GEP = Builder->CreateInBoundsGEP(L->getType(),
- L->getPointerOperand(), Indices);
- Instruction *NL = Builder->CreateLoad(GEP);
+ Builder.SetInsertPoint(L);
+ Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
+ L->getPointerOperand(), Indices);
+ Instruction *NL = Builder.CreateLoad(GEP);
// Whatever aliasing information we had for the orignal load must also
// hold for the smaller load, so propagate the annotations.
AAMDNodes Nodes;
@@ -2922,8 +2922,8 @@ bool InstCombiner::run() {
}
// Now that we have an instruction, try combining it to simplify it.
- Builder->SetInsertPoint(I);
- Builder->SetCurrentDebugLocation(I->getDebugLoc());
+ Builder.SetInsertPoint(I);
+ Builder.SetCurrentDebugLocation(I->getDebugLoc());
#ifndef NDEBUG
std::string OrigI;
@@ -3160,7 +3160,7 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
- InstCombiner IC(Worklist, &Builder, F.optForMinSize(), ExpensiveCombines,
+ InstCombiner IC(Worklist, Builder, F.optForMinSize(), ExpensiveCombines,
AA, AC, TLI, DT, DL, LI);
IC.MaxArraySizeForCombine = MaxArraySize;
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 7eea44d6aca0..184940b7ea58 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1230,7 +1230,7 @@ static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
// dyn_cast as we might get UndefValue
if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
- if (Masked->isNullValue())
+ if (Masked->isZero())
// Mask is constant false, so no instrumentation needed.
continue;
// If we have a true or undef value, fall through to doInstrumentAddress
diff --git a/lib/Transforms/Instrumentation/CFGMST.h b/lib/Transforms/Instrumentation/CFGMST.h
index 3802f9fbf7db..16e2e6b4e730 100644
--- a/lib/Transforms/Instrumentation/CFGMST.h
+++ b/lib/Transforms/Instrumentation/CFGMST.h
@@ -12,6 +12,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIB_TRANSFORMS_INSTRUMENTATION_CFGMST_H
+#define LLVM_LIB_TRANSFORMS_INSTRUMENTATION_CFGMST_H
+
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
@@ -24,10 +27,10 @@
#include <utility>
#include <vector>
-namespace llvm {
-
#define DEBUG_TYPE "cfgmst"
+namespace llvm {
+
/// \brief An union-find based Minimum Spanning Tree for CFG
///
/// Implements a Union-find algorithm to compute Minimum Spanning Tree
@@ -220,5 +223,8 @@ public:
}
};
-#undef DEBUG_TYPE // "cfgmst"
} // end namespace llvm
+
+#undef DEBUG_TYPE // "cfgmst"
+
+#endif // LLVM_LIB_TRANSFORMS_INSTRUMENTATION_CFGMST_H
diff --git a/lib/Transforms/Instrumentation/InstrProfiling.cpp b/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 9c14b0149fdc..db8fa8977947 100644
--- a/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -112,7 +112,7 @@ cl::opt<bool> DoCounterPromotion("do-counter-promotion", cl::ZeroOrMore,
cl::desc("Do counter register promotion"),
cl::init(false));
cl::opt<unsigned> MaxNumOfPromotionsPerLoop(
- cl::ZeroOrMore, "max-counter-promotions-per-loop", cl::init(10),
+ cl::ZeroOrMore, "max-counter-promotions-per-loop", cl::init(20),
cl::desc("Max number counter promotions per loop to avoid"
" increasing register pressure too much"));
@@ -121,10 +121,21 @@ cl::opt<int>
MaxNumOfPromotions(cl::ZeroOrMore, "max-counter-promotions", cl::init(-1),
cl::desc("Max number of allowed counter promotions"));
-cl::opt<bool> SpeculativeCounterPromotion(
- cl::ZeroOrMore, "speculative-counter-promotion", cl::init(false),
- cl::desc("Allow counter promotion for loops with multiple exiting blocks "
- " or top-tested loops. "));
+cl::opt<unsigned> SpeculativeCounterPromotionMaxExiting(
+ cl::ZeroOrMore, "speculative-counter-promotion-max-exiting", cl::init(3),
+ cl::desc("The max number of exiting blocks of a loop to allow "
+ " speculative counter promotion"));
+
+cl::opt<bool> SpeculativeCounterPromotionToLoop(
+ cl::ZeroOrMore, "speculative-counter-promotion-to-loop", cl::init(false),
+ cl::desc("When the option is false, if the target block is in a loop, "
+ "the promotion will be disallowed unless the promoted counter "
+ " update can be further/iteratively promoted into an acyclic "
+ " region."));
+
+cl::opt<bool> IterativeCounterPromotion(
+ cl::ZeroOrMore, "iterative-counter-promotion", cl::init(true),
+ cl::desc("Allow counter promotion across the whole loop nest."));
class InstrProfilingLegacyPass : public ModulePass {
InstrProfiling InstrProf;
@@ -150,6 +161,7 @@ public:
}
};
+///
/// A helper class to promote one counter RMW operation in the loop
/// into register update.
///
@@ -158,16 +170,19 @@ public:
///
class PGOCounterPromoterHelper : public LoadAndStorePromoter {
public:
- PGOCounterPromoterHelper(Instruction *L, Instruction *S, SSAUpdater &SSA,
- Value *Init, BasicBlock *PH,
- ArrayRef<BasicBlock *> ExitBlocks,
- ArrayRef<Instruction *> InsertPts)
+ PGOCounterPromoterHelper(
+ Instruction *L, Instruction *S, SSAUpdater &SSA, Value *Init,
+ BasicBlock *PH, ArrayRef<BasicBlock *> ExitBlocks,
+ ArrayRef<Instruction *> InsertPts,
+ DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
+ LoopInfo &LI)
: LoadAndStorePromoter({L, S}, SSA), Store(S), ExitBlocks(ExitBlocks),
- InsertPts(InsertPts) {
+ InsertPts(InsertPts), LoopToCandidates(LoopToCands), LI(LI) {
assert(isa<LoadInst>(L));
assert(isa<StoreInst>(S));
SSA.AddAvailableValue(PH, Init);
}
+
void doExtraRewritesBeforeFinalDeletion() const override {
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
BasicBlock *ExitBlock = ExitBlocks[i];
@@ -179,12 +194,21 @@ public:
Value *Addr = cast<StoreInst>(Store)->getPointerOperand();
IRBuilder<> Builder(InsertPos);
if (AtomicCounterUpdatePromoted)
+ // automic update currently can only be promoted across the current
+ // loop, not the whole loop nest.
Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue,
AtomicOrdering::SequentiallyConsistent);
else {
LoadInst *OldVal = Builder.CreateLoad(Addr, "pgocount.promoted");
auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue);
- Builder.CreateStore(NewVal, Addr);
+ auto *NewStore = Builder.CreateStore(NewVal, Addr);
+
+ // Now update the parent loop's candidate list:
+ if (IterativeCounterPromotion) {
+ auto *TargetLoop = LI.getLoopFor(ExitBlock);
+ if (TargetLoop)
+ LoopToCandidates[TargetLoop].emplace_back(OldVal, NewStore);
+ }
}
}
}
@@ -193,6 +217,8 @@ private:
Instruction *Store;
ArrayRef<BasicBlock *> ExitBlocks;
ArrayRef<Instruction *> InsertPts;
+ DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
+ LoopInfo &LI;
};
/// A helper class to do register promotion for all profile counter
@@ -200,12 +226,15 @@ private:
///
class PGOCounterPromoter {
public:
- PGOCounterPromoter(ArrayRef<LoadStorePair> Cands, Loop &Loop)
- : Candidates(Cands), ExitBlocks(), InsertPts(), ParentLoop(Loop) {
+ PGOCounterPromoter(
+ DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
+ Loop &CurLoop, LoopInfo &LI)
+ : LoopToCandidates(LoopToCands), ExitBlocks(), InsertPts(), L(CurLoop),
+ LI(LI) {
SmallVector<BasicBlock *, 8> LoopExitBlocks;
SmallPtrSet<BasicBlock *, 8> BlockSet;
- ParentLoop.getExitBlocks(LoopExitBlocks);
+ L.getExitBlocks(LoopExitBlocks);
for (BasicBlock *ExitBlock : LoopExitBlocks) {
if (BlockSet.insert(ExitBlock).second) {
@@ -216,55 +245,97 @@ public:
}
bool run(int64_t *NumPromoted) {
- // We can't insert into a catchswitch.
- bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
- return isa<CatchSwitchInst>(Exit->getTerminator());
- });
-
- if (HasCatchSwitch)
- return false;
-
- if (!ParentLoop.hasDedicatedExits())
- return false;
-
- BasicBlock *PH = ParentLoop.getLoopPreheader();
- if (!PH)
- return false;
-
- BasicBlock *H = ParentLoop.getHeader();
- bool TopTested =
- ((ParentLoop.getBlocks().size() > 1) && ParentLoop.isLoopExiting(H));
- if (!SpeculativeCounterPromotion &&
- (TopTested || ParentLoop.getExitingBlock() == nullptr))
+ unsigned MaxProm = getMaxNumOfPromotionsInLoop(&L);
+ if (MaxProm == 0)
return false;
unsigned Promoted = 0;
- for (auto &Cand : Candidates) {
+ for (auto &Cand : LoopToCandidates[&L]) {
SmallVector<PHINode *, 4> NewPHIs;
SSAUpdater SSA(&NewPHIs);
Value *InitVal = ConstantInt::get(Cand.first->getType(), 0);
+
PGOCounterPromoterHelper Promoter(Cand.first, Cand.second, SSA, InitVal,
- PH, ExitBlocks, InsertPts);
+ L.getLoopPreheader(), ExitBlocks,
+ InsertPts, LoopToCandidates, LI);
Promoter.run(SmallVector<Instruction *, 2>({Cand.first, Cand.second}));
Promoted++;
- if (Promoted >= MaxNumOfPromotionsPerLoop)
+ if (Promoted >= MaxProm)
break;
+
(*NumPromoted)++;
if (MaxNumOfPromotions != -1 && *NumPromoted >= MaxNumOfPromotions)
break;
}
DEBUG(dbgs() << Promoted << " counters promoted for loop (depth="
- << ParentLoop.getLoopDepth() << ")\n");
+ << L.getLoopDepth() << ")\n");
return Promoted != 0;
}
private:
- ArrayRef<LoadStorePair> Candidates;
+ bool allowSpeculativeCounterPromotion(Loop *LP) {
+ SmallVector<BasicBlock *, 8> ExitingBlocks;
+ L.getExitingBlocks(ExitingBlocks);
+ // Not considierered speculative.
+ if (ExitingBlocks.size() == 1)
+ return true;
+ if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
+ return false;
+ return true;
+ }
+
+ // Returns the max number of Counter Promotions for LP.
+ unsigned getMaxNumOfPromotionsInLoop(Loop *LP) {
+ // We can't insert into a catchswitch.
+ SmallVector<BasicBlock *, 8> LoopExitBlocks;
+ LP->getExitBlocks(LoopExitBlocks);
+ if (llvm::any_of(LoopExitBlocks, [](BasicBlock *Exit) {
+ return isa<CatchSwitchInst>(Exit->getTerminator());
+ }))
+ return 0;
+
+ if (!LP->hasDedicatedExits())
+ return 0;
+
+ BasicBlock *PH = LP->getLoopPreheader();
+ if (!PH)
+ return 0;
+
+ SmallVector<BasicBlock *, 8> ExitingBlocks;
+ LP->getExitingBlocks(ExitingBlocks);
+ // Not considierered speculative.
+ if (ExitingBlocks.size() == 1)
+ return MaxNumOfPromotionsPerLoop;
+
+ if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
+ return 0;
+
+ // Whether the target block is in a loop does not matter:
+ if (SpeculativeCounterPromotionToLoop)
+ return MaxNumOfPromotionsPerLoop;
+
+ // Now check the target block:
+ unsigned MaxProm = MaxNumOfPromotionsPerLoop;
+ for (auto *TargetBlock : LoopExitBlocks) {
+ auto *TargetLoop = LI.getLoopFor(TargetBlock);
+ if (!TargetLoop)
+ continue;
+ unsigned MaxPromForTarget = getMaxNumOfPromotionsInLoop(TargetLoop);
+ unsigned PendingCandsInTarget = LoopToCandidates[TargetLoop].size();
+ MaxProm =
+ std::min(MaxProm, std::max(MaxPromForTarget, PendingCandsInTarget) -
+ PendingCandsInTarget);
+ }
+ return MaxProm;
+ }
+
+ DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
SmallVector<BasicBlock *, 8> ExitBlocks;
SmallVector<Instruction *, 8> InsertPts;
- Loop &ParentLoop;
+ Loop &L;
+ LoopInfo &LI;
};
} // end anonymous namespace
@@ -349,8 +420,10 @@ void InstrProfiling::promoteCounterLoadStores(Function *F) {
SmallVector<Loop *, 4> Loops = LI.getLoopsInPreorder();
- for (auto *Loop : Loops) {
- PGOCounterPromoter Promoter(LoopPromotionCandidates[Loop], *Loop);
+ // Do a post-order traversal of the loops so that counter updates can be
+ // iteratively hoisted outside the loop nest.
+ for (auto *Loop : llvm::reverse(Loops)) {
+ PGOCounterPromoter Promoter(LoopPromotionCandidates, *Loop, LI);
Promoter.run(&TotalCountersPromoted);
}
}
diff --git a/lib/Transforms/Instrumentation/MaximumSpanningTree.h b/lib/Transforms/Instrumentation/MaximumSpanningTree.h
index 363539b2886f..4eb758c69c58 100644
--- a/lib/Transforms/Instrumentation/MaximumSpanningTree.h
+++ b/lib/Transforms/Instrumentation/MaximumSpanningTree.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_ANALYSIS_MAXIMUMSPANNINGTREE_H
-#define LLVM_ANALYSIS_MAXIMUMSPANNINGTREE_H
+#ifndef LLVM_LIB_TRANSFORMS_INSTRUMENTATION_MAXIMUMSPANNINGTREE_H
+#define LLVM_LIB_TRANSFORMS_INSTRUMENTATION_MAXIMUMSPANNINGTREE_H
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/IR/BasicBlock.h"
@@ -108,4 +108,4 @@ namespace llvm {
} // End llvm namespace
-#endif
+#endif // LLVM_LIB_TRANSFORMS_INSTRUMENTATION_MAXIMUMSPANNINGTREE_H
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index df4ee9969c02..1348e0ed0ed0 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2918,8 +2918,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (ClDumpStrictInstructions)
dumpInst(I);
DEBUG(dbgs() << "DEFAULT: " << I << "\n");
- for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
- insertShadowCheck(I.getOperand(i), &I);
+ for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
+ Value *Operand = I.getOperand(i);
+ if (Operand->getType()->isSized())
+ insertShadowCheck(Operand, &I);
+ }
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
diff --git a/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index 0e7d11c55397..8e4bfc0b91bc 100644
--- a/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -224,7 +224,7 @@ std::string getBranchCondString(Instruction *TI) {
OS << "_Zero";
else if (CV->isOne())
OS << "_One";
- else if (CV->isAllOnesValue())
+ else if (CV->isMinusOne())
OS << "_MinusOne";
else
OS << "_Const";
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index a991792bf5a3..ec6904486e10 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -379,10 +379,11 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
}
static bool isAtomic(Instruction *I) {
+ // TODO: Ask TTI whether synchronization scope is between threads.
if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->isAtomic() && LI->getSynchScope() == CrossThread;
+ return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return SI->isAtomic() && SI->getSynchScope() == CrossThread;
+ return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
if (isa<AtomicRMWInst>(I))
return true;
if (isa<AtomicCmpXchgInst>(I))
@@ -676,7 +677,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
I->eraseFromParent();
} else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
- Function *F = FI->getSynchScope() == SingleThread ?
+ Function *F = FI->getSyncScopeID() == SyncScope::SingleThread ?
TsanAtomicSignalFence : TsanAtomicThreadFence;
CallInst *C = CallInst::Create(F, Args);
ReplaceInstWithInst(I, C);
diff --git a/lib/Transforms/Scalar/ConstantHoisting.cpp b/lib/Transforms/Scalar/ConstantHoisting.cpp
index a49c9b68c97d..122c9314e022 100644
--- a/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -44,6 +44,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Local.h"
#include <tuple>
using namespace llvm;
@@ -55,7 +56,7 @@ STATISTIC(NumConstantsHoisted, "Number of constants hoisted");
STATISTIC(NumConstantsRebased, "Number of constants rebased");
static cl::opt<bool> ConstHoistWithBlockFrequency(
- "consthoist-with-block-frequency", cl::init(false), cl::Hidden,
+ "consthoist-with-block-frequency", cl::init(true), cl::Hidden,
cl::desc("Enable the use of the block frequency analysis to reduce the "
"chance to execute const materialization more frequently than "
"without hoisting."));
@@ -231,7 +232,8 @@ static void findBestInsertionSet(DominatorTree &DT, BlockFrequencyInfo &BFI,
// Return the optimal insert points in BBs.
if (Node == Entry) {
BBs.clear();
- if (InsertPtsFreq > BFI.getBlockFreq(Node))
+ if (InsertPtsFreq > BFI.getBlockFreq(Node) ||
+ (InsertPtsFreq == BFI.getBlockFreq(Node) && InsertPts.size() > 1))
BBs.insert(Entry);
else
BBs.insert(InsertPts.begin(), InsertPts.end());
@@ -244,7 +246,15 @@ static void findBestInsertionSet(DominatorTree &DT, BlockFrequencyInfo &BFI,
SmallPtrSet<BasicBlock *, 16> &ParentInsertPts = InsertPtsMap[Parent].first;
BlockFrequency &ParentPtsFreq = InsertPtsMap[Parent].second;
// Choose to insert in Node or in subtree of Node.
- if (InsertPtsFreq > BFI.getBlockFreq(Node) || NodeInBBs) {
+ // Don't hoist to EHPad because we may not find a proper place to insert
+ // in EHPad.
+ // If the total frequency of InsertPts is the same as the frequency of the
+ // target Node, and InsertPts contains more than one nodes, choose hoisting
+ // to reduce code size.
+ if (NodeInBBs ||
+ (!Node->isEHPad() &&
+ (InsertPtsFreq > BFI.getBlockFreq(Node) ||
+ (InsertPtsFreq == BFI.getBlockFreq(Node) && InsertPts.size() > 1)))) {
ParentInsertPts.insert(Node);
ParentPtsFreq += BFI.getBlockFreq(Node);
} else {
@@ -392,42 +402,15 @@ void ConstantHoistingPass::collectConstantCandidates(
if (Inst->isCast())
return;
- // Can't handle inline asm. Skip it.
- if (auto Call = dyn_cast<CallInst>(Inst))
- if (isa<InlineAsm>(Call->getCalledValue()))
- return;
-
- // Switch cases must remain constant, and if the value being tested is
- // constant the entire thing should disappear.
- if (isa<SwitchInst>(Inst))
- return;
-
- // Static allocas (constant size in the entry block) are handled by
- // prologue/epilogue insertion so they're free anyway. We definitely don't
- // want to make them non-constant.
- auto AI = dyn_cast<AllocaInst>(Inst);
- if (AI && AI->isStaticAlloca())
- return;
-
- // Constants in GEPs that index into a struct type should not be hoisted.
- if (isa<GetElementPtrInst>(Inst)) {
- gep_type_iterator GTI = gep_type_begin(Inst);
-
- // Collect constant for first operand.
- collectConstantCandidates(ConstCandMap, Inst, 0);
- // Scan rest operands.
- for (unsigned Idx = 1, E = Inst->getNumOperands(); Idx != E; ++Idx, ++GTI) {
- // Only collect constants that index into a non struct type.
- if (!GTI.isStruct()) {
- collectConstantCandidates(ConstCandMap, Inst, Idx);
- }
- }
- return;
- }
-
// Scan all operands.
for (unsigned Idx = 0, E = Inst->getNumOperands(); Idx != E; ++Idx) {
- collectConstantCandidates(ConstCandMap, Inst, Idx);
+ // The cost of materializing the constants (defined in
+ // `TargetTransformInfo::getIntImmCost`) for instructions which only take
+ // constant variables is lower than `TargetTransformInfo::TCC_Basic`. So
+ // it's safe for us to collect constant candidates from all IntrinsicInsts.
+ if (canReplaceOperandWithVariable(Inst, Idx) || isa<IntrinsicInst>(Inst)) {
+ collectConstantCandidates(ConstCandMap, Inst, Idx);
+ }
} // end of for all operands
}
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index 0f92760a874b..7fd77a082b82 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -670,7 +670,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
if (auto *KnownCond = AvailableValues.lookup(CondI)) {
// Is the condition known to be true?
if (isa<ConstantInt>(KnownCond) &&
- cast<ConstantInt>(KnownCond)->isOneValue()) {
+ cast<ConstantInt>(KnownCond)->isOne()) {
DEBUG(dbgs() << "EarlyCSE removing guard: " << *Inst << '\n');
removeMSSA(Inst);
Inst->eraseFromParent();
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index c0f628eb61e6..0fe72f3f7331 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -80,10 +80,9 @@ MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore,
struct llvm::GVN::Expression {
uint32_t opcode;
Type *type;
- bool commutative;
SmallVector<uint32_t, 4> varargs;
- Expression(uint32_t o = ~2U) : opcode(o), commutative(false) {}
+ Expression(uint32_t o = ~2U) : opcode(o) {}
bool operator==(const Expression &other) const {
if (opcode != other.opcode)
@@ -247,7 +246,6 @@ GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
if (e.varargs[0] > e.varargs[1])
std::swap(e.varargs[0], e.varargs[1]);
- e.commutative = true;
}
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
@@ -258,7 +256,6 @@ GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
Predicate = CmpInst::getSwappedPredicate(Predicate);
}
e.opcode = (C->getOpcode() << 8) | Predicate;
- e.commutative = true;
} else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) {
for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
II != IE; ++II)
@@ -284,7 +281,6 @@ GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
Predicate = CmpInst::getSwappedPredicate(Predicate);
}
e.opcode = (Opcode << 8) | Predicate;
- e.commutative = true;
return e;
}
@@ -352,25 +348,25 @@ GVN::ValueTable::~ValueTable() = default;
/// add - Insert a value into the table with a specified value number.
void GVN::ValueTable::add(Value *V, uint32_t num) {
valueNumbering.insert(std::make_pair(V, num));
- if (PHINode *PN = dyn_cast<PHINode>(V))
- NumberingPhi[num] = PN;
}
uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
if (AA->doesNotAccessMemory(C)) {
Expression exp = createExpr(C);
- uint32_t e = assignExpNewValueNum(exp).first;
+ uint32_t &e = expressionNumbering[exp];
+ if (!e) e = nextValueNumber++;
valueNumbering[C] = e;
return e;
} else if (AA->onlyReadsMemory(C)) {
Expression exp = createExpr(C);
- auto ValNum = assignExpNewValueNum(exp);
- if (ValNum.second) {
- valueNumbering[C] = ValNum.first;
- return ValNum.first;
+ uint32_t &e = expressionNumbering[exp];
+ if (!e) {
+ e = nextValueNumber++;
+ valueNumbering[C] = e;
+ return e;
}
if (!MD) {
- uint32_t e = assignExpNewValueNum(exp).first;
+ e = nextValueNumber++;
valueNumbering[C] = e;
return e;
}
@@ -526,29 +522,23 @@ uint32_t GVN::ValueTable::lookupOrAdd(Value *V) {
case Instruction::ExtractValue:
exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
break;
- case Instruction::PHI:
- valueNumbering[V] = nextValueNumber;
- NumberingPhi[nextValueNumber] = cast<PHINode>(V);
- return nextValueNumber++;
default:
valueNumbering[V] = nextValueNumber;
return nextValueNumber++;
}
- uint32_t e = assignExpNewValueNum(exp).first;
+ uint32_t& e = expressionNumbering[exp];
+ if (!e) e = nextValueNumber++;
valueNumbering[V] = e;
return e;
}
/// Returns the value number of the specified value. Fails if
/// the value has not yet been numbered.
-uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const {
+uint32_t GVN::ValueTable::lookup(Value *V) const {
DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
- if (Verify) {
- assert(VI != valueNumbering.end() && "Value not numbered?");
- return VI->second;
- }
- return (VI != valueNumbering.end()) ? VI->second : 0;
+ assert(VI != valueNumbering.end() && "Value not numbered?");
+ return VI->second;
}
/// Returns the value number of the given comparison,
@@ -559,28 +549,21 @@ uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode,
CmpInst::Predicate Predicate,
Value *LHS, Value *RHS) {
Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
- return assignExpNewValueNum(exp).first;
+ uint32_t& e = expressionNumbering[exp];
+ if (!e) e = nextValueNumber++;
+ return e;
}
/// Remove all entries from the ValueTable.
void GVN::ValueTable::clear() {
valueNumbering.clear();
expressionNumbering.clear();
- NumberingPhi.clear();
- PhiTranslateTable.clear();
nextValueNumber = 1;
- Expressions.clear();
- ExprIdx.clear();
- nextExprNumber = 0;
}
/// Remove a value from the value numbering.
void GVN::ValueTable::erase(Value *V) {
- uint32_t Num = valueNumbering.lookup(V);
valueNumbering.erase(V);
- // If V is PHINode, V <--> value number is an one-to-one mapping.
- if (isa<PHINode>(V))
- NumberingPhi.erase(Num);
}
/// verifyRemoved - Verify that the value is removed from all internal data
@@ -1183,7 +1166,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre",
LI->isVolatile(), LI->getAlignment(),
- LI->getOrdering(), LI->getSynchScope(),
+ LI->getOrdering(), LI->getSyncScopeID(),
UnavailablePred->getTerminator());
// Transfer the old load's AA tags to the new load.
@@ -1219,7 +1202,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
V->takeName(LI);
if (Instruction *I = dyn_cast<Instruction>(V))
I->setDebugLoc(LI->getDebugLoc());
- if (V->getType()->getScalarType()->isPointerTy())
+ if (V->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
ORE->emit(OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI)
@@ -1306,7 +1289,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// to propagate LI's DebugLoc because LI may not post-dominate I.
if (LI->getDebugLoc() && LI->getParent() == I->getParent())
I->setDebugLoc(LI->getDebugLoc());
- if (V->getType()->getScalarType()->isPointerTy())
+ if (V->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
++NumGVNLoad;
@@ -1460,7 +1443,7 @@ bool GVN::processLoad(LoadInst *L) {
reportLoadElim(L, AvailableValue, ORE);
// Tell MDA to rexamine the reused pointer since we might have more
// information after forwarding it.
- if (MD && AvailableValue->getType()->getScalarType()->isPointerTy())
+ if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(AvailableValue);
return true;
}
@@ -1468,95 +1451,6 @@ bool GVN::processLoad(LoadInst *L) {
return false;
}
-/// Return a pair the first field showing the value number of \p Exp and the
-/// second field showing whether it is a value number newly created.
-std::pair<uint32_t, bool>
-GVN::ValueTable::assignExpNewValueNum(Expression &Exp) {
- uint32_t &e = expressionNumbering[Exp];
- bool CreateNewValNum = !e;
- if (CreateNewValNum) {
- Expressions.push_back(Exp);
- if (ExprIdx.size() < nextValueNumber + 1)
- ExprIdx.resize(nextValueNumber * 2);
- e = nextValueNumber;
- ExprIdx[nextValueNumber++] = nextExprNumber++;
- }
- return {e, CreateNewValNum};
-}
-
-/// Return whether all the values related with the same \p num are
-/// defined in \p BB.
-bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
- GVN &Gvn) {
- LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
- while (Vals && Vals->BB == BB)
- Vals = Vals->Next;
- return !Vals;
-}
-
-/// Wrap phiTranslateImpl to provide caching functionality.
-uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred,
- const BasicBlock *PhiBlock, uint32_t Num,
- GVN &Gvn) {
- auto FindRes = PhiTranslateTable.find({Num, Pred});
- if (FindRes != PhiTranslateTable.end())
- return FindRes->second;
- uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
- PhiTranslateTable.insert({{Num, Pred}, NewNum});
- return NewNum;
-}
-
-/// Translate value number \p Num using phis, so that it has the values of
-/// the phis in BB.
-uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
- const BasicBlock *PhiBlock,
- uint32_t Num, GVN &Gvn) {
- if (PHINode *PN = NumberingPhi[Num]) {
- for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
- if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
- if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
- return TransVal;
- }
- return Num;
- }
-
- // If there is any value related with Num is defined in a BB other than
- // PhiBlock, it cannot depend on a phi in PhiBlock without going through
- // a backedge. We can do an early exit in that case to save compile time.
- if (!areAllValsInBB(Num, PhiBlock, Gvn))
- return Num;
-
- if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
- return Num;
- Expression Exp = Expressions[ExprIdx[Num]];
-
- for (unsigned i = 0; i < Exp.varargs.size(); i++) {
- // For InsertValue and ExtractValue, some varargs are index numbers
- // instead of value numbers. Those index numbers should not be
- // translated.
- if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
- (i > 0 && Exp.opcode == Instruction::ExtractValue))
- continue;
- Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
- }
-
- if (Exp.commutative) {
- assert(Exp.varargs.size() == 2 && "Unsupported commutative expression!");
- if (Exp.varargs[0] > Exp.varargs[1]) {
- std::swap(Exp.varargs[0], Exp.varargs[1]);
- uint32_t Opcode = Exp.opcode >> 8;
- if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
- Exp.opcode = (Opcode << 8) |
- CmpInst::getSwappedPredicate(
- static_cast<CmpInst::Predicate>(Exp.opcode & 255));
- }
- }
-
- if (uint32_t NewNum = expressionNumbering[Exp])
- return NewNum;
- return Num;
-}
-
// In order to find a leader for a given value number at a
// specific basic block, we first obtain the list of all Values for that number,
// and then scan the list to find one whose block dominates the block in
@@ -1601,15 +1495,6 @@ static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
return Pred != nullptr;
}
-
-void GVN::assignBlockRPONumber(Function &F) {
- uint32_t NextBlockNumber = 1;
- ReversePostOrderTraversal<Function *> RPOT(&F);
- for (BasicBlock *BB : RPOT)
- BlockRPONumber[BB] = NextBlockNumber++;
-}
-
-
// Tries to replace instruction with const, using information from
// ReplaceWithConstMap.
bool GVN::replaceOperandsWithConsts(Instruction *Instr) const {
@@ -1713,7 +1598,7 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
// RHS neither 'true' nor 'false' - bail out.
continue;
// Whether RHS equals 'true'. Otherwise it equals 'false'.
- bool isKnownTrue = CI->isAllOnesValue();
+ bool isKnownTrue = CI->isMinusOne();
bool isKnownFalse = !isKnownTrue;
// If "A && B" is known true then both A and B are known true. If "A || B"
@@ -1813,7 +1698,7 @@ bool GVN::processInstruction(Instruction *I) {
Changed = true;
}
if (Changed) {
- if (MD && V->getType()->getScalarType()->isPointerTy())
+ if (MD && V->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(V);
++NumGVNSimpl;
return true;
@@ -1924,7 +1809,7 @@ bool GVN::processInstruction(Instruction *I) {
// Remove it!
patchAndReplaceAllUsesWith(I, Repl);
- if (MD && Repl->getType()->getScalarType()->isPointerTy())
+ if (MD && Repl->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(Repl);
markInstructionForDeletion(I);
return true;
@@ -1971,7 +1856,6 @@ bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
// Fabricate val-num for dead-code in order to suppress assertion in
// performPRE().
assignValNumForDeadCode();
- assignBlockRPONumber(F);
bool PREChanged = true;
while (PREChanged) {
PREChanged = performPRE(F);
@@ -2043,7 +1927,7 @@ bool GVN::processBlock(BasicBlock *BB) {
// Instantiate an expression in a predecessor that lacked it.
bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
- BasicBlock *Curr, unsigned int ValNo) {
+ unsigned int ValNo) {
// Because we are going top-down through the block, all value numbers
// will be available in the predecessor by the time we need them. Any
// that weren't originally present will have been instantiated earlier
@@ -2061,9 +1945,7 @@ bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
success = false;
break;
}
- uint32_t TValNo =
- VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
- if (Value *V = findLeader(Pred, TValNo)) {
+ if (Value *V = findLeader(Pred, VN.lookup(Op))) {
Instr->setOperand(i, V);
} else {
success = false;
@@ -2080,12 +1962,10 @@ bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
Instr->insertBefore(Pred->getTerminator());
Instr->setName(Instr->getName() + ".pre");
Instr->setDebugLoc(Instr->getDebugLoc());
-
- unsigned Num = VN.lookupOrAdd(Instr);
- VN.add(Instr, Num);
+ VN.add(Instr, ValNo);
// Update the availability map to include the new instruction.
- addToLeaderTable(Num, Instr, Pred);
+ addToLeaderTable(ValNo, Instr, Pred);
return true;
}
@@ -2123,27 +2003,18 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
for (BasicBlock *P : predecessors(CurrentBlock)) {
- // We're not interested in PRE where blocks with predecessors that are
- // not reachable.
- if (!DT->isReachableFromEntry(P)) {
+ // We're not interested in PRE where the block is its
+ // own predecessor, or in blocks with predecessors
+ // that are not reachable.
+ if (P == CurrentBlock) {
NumWithout = 2;
break;
- }
- // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
- // when CurInst has operand defined in CurrentBlock (so it may be defined
- // by phi in the loop header).
- if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
- any_of(CurInst->operands(), [&](const Use &U) {
- if (auto *Inst = dyn_cast<Instruction>(U.get()))
- return Inst->getParent() == CurrentBlock;
- return false;
- })) {
+ } else if (!DT->isReachableFromEntry(P)) {
NumWithout = 2;
break;
}
- uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
- Value *predV = findLeader(P, TValNo);
+ Value *predV = findLeader(P, ValNo);
if (!predV) {
predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
PREPred = P;
@@ -2183,7 +2054,7 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
}
// We need to insert somewhere, so let's give it a shot
PREInstr = CurInst->clone();
- if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
+ if (!performScalarPREInsertion(PREInstr, PREPred, ValNo)) {
// If we failed insertion, make sure we remove the instruction.
DEBUG(verifyRemoved(PREInstr));
PREInstr->deleteValue();
@@ -2212,7 +2083,7 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
addToLeaderTable(ValNo, Phi, CurrentBlock);
Phi->setDebugLoc(CurInst->getDebugLoc());
CurInst->replaceAllUsesWith(Phi);
- if (MD && Phi->getType()->getScalarType()->isPointerTy())
+ if (MD && Phi->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(Phi);
VN.erase(CurInst);
removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
@@ -2297,7 +2168,6 @@ bool GVN::iterateOnFunction(Function &F) {
void GVN::cleanupGlobalSets() {
VN.clear();
LeaderTable.clear();
- BlockRPONumber.clear();
TableAllocator.Reset();
}
diff --git a/lib/Transforms/Scalar/InferAddressSpaces.cpp b/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 3c8fbd35bf8c..89b28f0aeee6 100644
--- a/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -232,7 +232,7 @@ bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II,
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:{
const ConstantInt *IsVolatile = dyn_cast<ConstantInt>(II->getArgOperand(4));
- if (!IsVolatile || !IsVolatile->isNullValue())
+ if (!IsVolatile || !IsVolatile->isZero())
return false;
LLVM_FALLTHROUGH;
@@ -358,7 +358,8 @@ InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
// If the operands of the expression on the top are already explored,
// adds that expression to the resultant postorder.
if (PostorderStack.back().second) {
- Postorder.push_back(TopVal);
+ if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace)
+ Postorder.push_back(TopVal);
PostorderStack.pop_back();
continue;
}
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 05293eb0079f..ee3de51b1360 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1212,7 +1212,7 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
LoadInst *NewVal = new LoadInst(
LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
LI->getName() + ".pr", false, LI->getAlignment(), LI->getOrdering(),
- LI->getSynchScope(), UnavailablePred->getTerminator());
+ LI->getSyncScopeID(), UnavailablePred->getTerminator());
NewVal->setDebugLoc(LI->getDebugLoc());
if (AATags)
NewVal->setAAMetadata(AATags);
diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp
index c41cc42db5e2..ac4dd44a0e90 100644
--- a/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -148,25 +148,27 @@ static bool deleteLoopIfDead(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
LoopInfo &LI, LPMUpdater *Updater = nullptr) {
assert(L->isLCSSAForm(DT) && "Expected LCSSA!");
- // We can only remove the loop if there is a preheader that we can
- // branch from after removing it.
+ // We can only remove the loop if there is a preheader that we can branch from
+ // after removing it. Also, if LoopSimplify form is not available, stay out
+ // of trouble.
BasicBlock *Preheader = L->getLoopPreheader();
- if (!Preheader)
+ if (!Preheader || !L->hasDedicatedExits()) {
+ DEBUG(dbgs()
+ << "Deletion requires Loop with preheader and dedicated exits.\n");
return false;
-
- // If LoopSimplify form is not available, stay out of trouble.
- if (!L->hasDedicatedExits())
- return false;
-
+ }
// We can't remove loops that contain subloops. If the subloops were dead,
// they would already have been removed in earlier executions of this pass.
- if (L->begin() != L->end())
+ if (L->begin() != L->end()) {
+ DEBUG(dbgs() << "Loop contains subloops.\n");
return false;
+ }
BasicBlock *ExitBlock = L->getUniqueExitBlock();
if (ExitBlock && isLoopNeverExecuted(L)) {
+ DEBUG(dbgs() << "Loop is proven to never execute, delete it!");
// Set incoming value to undef for phi nodes in the exit block.
BasicBlock::iterator BI = ExitBlock->begin();
while (PHINode *P = dyn_cast<PHINode>(BI)) {
@@ -188,20 +190,26 @@ static bool deleteLoopIfDead(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
// be in the situation of needing to be able to solve statically which exit
// block will be branched to, or trying to preserve the branching logic in
// a loop invariant manner.
- if (!ExitBlock)
+ if (!ExitBlock) {
+ DEBUG(dbgs() << "Deletion requires single exit block\n");
return false;
-
+ }
// Finally, we have to check that the loop really is dead.
bool Changed = false;
- if (!isLoopDead(L, SE, ExitingBlocks, ExitBlock, Changed, Preheader))
+ if (!isLoopDead(L, SE, ExitingBlocks, ExitBlock, Changed, Preheader)) {
+ DEBUG(dbgs() << "Loop is not invariant, cannot delete.\n");
return Changed;
+ }
// Don't remove loops for which we can't solve the trip count.
// They could be infinite, in which case we'd be changing program behavior.
const SCEV *S = SE.getMaxBackedgeTakenCount(L);
- if (isa<SCEVCouldNotCompute>(S))
+ if (isa<SCEVCouldNotCompute>(S)) {
+ DEBUG(dbgs() << "Could not compute SCEV MaxBackedgeTakenCount.\n");
return Changed;
+ }
+ DEBUG(dbgs() << "Loop is invariant, delete it!");
deleteDeadLoop(L, DT, SE, LI, Updater);
++NumDeleted;
@@ -311,6 +319,9 @@ static void deleteDeadLoop(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
PreservedAnalyses LoopDeletionPass::run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &Updater) {
+
+ DEBUG(dbgs() << "Analyzing Loop for deletion: ");
+ DEBUG(L.dump());
if (!deleteLoopIfDead(&L, AR.DT, AR.SE, AR.LI, &Updater))
return PreservedAnalyses::all();
@@ -350,5 +361,7 @@ bool LoopDeletionLegacyPass::runOnLoop(Loop *L, LPPassManager &) {
ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ DEBUG(dbgs() << "Analyzing Loop for deletion: ");
+ DEBUG(L->dump());
return deleteLoopIfDead(L, DT, SE, LI);
}
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 8b435050ac76..4a6a35c0ab1b 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -1160,7 +1160,7 @@ static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
if (!Dec ||
!((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
(SubInst->getOpcode() == Instruction::Add &&
- Dec->isAllOnesValue()))) {
+ Dec->isMinusOne()))) {
return false;
}
}
diff --git a/lib/Transforms/Scalar/LoopInterchange.cpp b/lib/Transforms/Scalar/LoopInterchange.cpp
index 9f3875a3027f..606136dc31a4 100644
--- a/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -757,8 +757,11 @@ bool LoopInterchangeLegality::currentLimitations() {
PHINode *InnerInductionVar;
SmallVector<PHINode *, 8> Inductions;
SmallVector<PHINode *, 8> Reductions;
- if (!findInductionAndReductions(InnerLoop, Inductions, Reductions))
+ if (!findInductionAndReductions(InnerLoop, Inductions, Reductions)) {
+ DEBUG(dbgs() << "Only inner loops with induction or reduction PHI nodes "
+ << "are supported currently.\n");
return true;
+ }
// TODO: Currently we handle only loops with 1 induction variable.
if (Inductions.size() != 1) {
@@ -771,16 +774,25 @@ bool LoopInterchangeLegality::currentLimitations() {
InnerInductionVar = Inductions.pop_back_val();
Reductions.clear();
- if (!findInductionAndReductions(OuterLoop, Inductions, Reductions))
+ if (!findInductionAndReductions(OuterLoop, Inductions, Reductions)) {
+ DEBUG(dbgs() << "Only outer loops with induction or reduction PHI nodes "
+ << "are supported currently.\n");
return true;
+ }
// Outer loop cannot have reduction because then loops will not be tightly
// nested.
- if (!Reductions.empty())
+ if (!Reductions.empty()) {
+ DEBUG(dbgs() << "Outer loops with reductions are not supported "
+ << "currently.\n");
return true;
+ }
// TODO: Currently we handle only loops with 1 induction variable.
- if (Inductions.size() != 1)
+ if (Inductions.size() != 1) {
+ DEBUG(dbgs() << "Loops with more than 1 induction variables are not "
+ << "supported currently.\n");
return true;
+ }
// TODO: Triangular loops are not handled for now.
if (!isLoopStructureUnderstood(InnerInductionVar)) {
@@ -791,12 +803,16 @@ bool LoopInterchangeLegality::currentLimitations() {
// TODO: We only handle LCSSA PHI's corresponding to reduction for now.
BasicBlock *LoopExitBlock =
getLoopLatchExitBlock(OuterLoopLatch, OuterLoopHeader);
- if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, true))
+ if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, true)) {
+ DEBUG(dbgs() << "Can only handle LCSSA PHIs in outer loops currently.\n");
return true;
+ }
LoopExitBlock = getLoopLatchExitBlock(InnerLoopLatch, InnerLoopHeader);
- if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, false))
+ if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, false)) {
+ DEBUG(dbgs() << "Can only handle LCSSA PHIs in inner loops currently.\n");
return true;
+ }
// TODO: Current limitation: Since we split the inner loop latch at the point
// were induction variable is incremented (induction.next); We cannot have
@@ -816,8 +832,11 @@ bool LoopInterchangeLegality::currentLimitations() {
InnerIndexVarInc =
dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(0));
- if (!InnerIndexVarInc)
+ if (!InnerIndexVarInc) {
+ DEBUG(dbgs() << "Did not find an instruction to increment the induction "
+ << "variable.\n");
return true;
+ }
// Since we split the inner loop latch on this induction variable. Make sure
// we do not have any instruction between the induction variable and branch
@@ -827,19 +846,24 @@ bool LoopInterchangeLegality::currentLimitations() {
for (const Instruction &I : reverse(*InnerLoopLatch)) {
if (isa<BranchInst>(I) || isa<CmpInst>(I) || isa<TruncInst>(I))
continue;
+
// We found an instruction. If this is not induction variable then it is not
// safe to split this loop latch.
- if (!I.isIdenticalTo(InnerIndexVarInc))
+ if (!I.isIdenticalTo(InnerIndexVarInc)) {
+ DEBUG(dbgs() << "Found unsupported instructions between induction "
+ << "variable increment and branch.\n");
return true;
+ }
FoundInduction = true;
break;
}
// The loop latch ended and we didn't find the induction variable return as
// current limitation.
- if (!FoundInduction)
+ if (!FoundInduction) {
+ DEBUG(dbgs() << "Did not find the induction variable.\n");
return true;
-
+ }
return false;
}
diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp
index 7312d97f8efe..3506ac343d59 100644
--- a/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/lib/Transforms/Scalar/LoopRotation.cpp
@@ -485,10 +485,22 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
DomTreeNode *Node = HeaderChildren[I];
BasicBlock *BB = Node->getBlock();
- pred_iterator PI = pred_begin(BB);
- BasicBlock *NearestDom = *PI;
- for (pred_iterator PE = pred_end(BB); PI != PE; ++PI)
- NearestDom = DT->findNearestCommonDominator(NearestDom, *PI);
+ BasicBlock *NearestDom = nullptr;
+ for (BasicBlock *Pred : predecessors(BB)) {
+ // Consider only reachable basic blocks.
+ if (!DT->getNode(Pred))
+ continue;
+
+ if (!NearestDom) {
+ NearestDom = Pred;
+ continue;
+ }
+
+ NearestDom = DT->findNearestCommonDominator(NearestDom, Pred);
+ assert(NearestDom && "No NearestCommonDominator found");
+ }
+
+ assert(NearestDom && "Nearest dominator not found");
// Remember if this changes the DomTree.
if (Node->getIDom()->getBlock() != NearestDom) {
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 73436f13c94e..3638da118cb7 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -140,6 +140,13 @@ static cl::opt<bool> LSRExpNarrow(
cl::desc("Narrow LSR complex solution using"
" expectation of registers number"));
+// Flag to narrow search space by filtering non-optimal formulae with
+// the same ScaledReg and Scale.
+static cl::opt<bool> FilterSameScaledReg(
+ "lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true),
+ cl::desc("Narrow LSR search space by filtering non-optimal formulae"
+ " with the same ScaledReg and Scale"));
+
#ifndef NDEBUG
// Stress test IV chain generation.
static cl::opt<bool> StressIVChain(
@@ -1902,6 +1909,7 @@ class LSRInstance {
void NarrowSearchSpaceByDetectingSupersets();
void NarrowSearchSpaceByCollapsingUnrolledCode();
void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
+ void NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
void NarrowSearchSpaceByDeletingCostlyFormulas();
void NarrowSearchSpaceByPickingWinnerRegs();
void NarrowSearchSpaceUsingHeuristics();
@@ -2318,7 +2326,7 @@ LSRInstance::OptimizeLoopTermCond() {
dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
const ConstantInt *C = D->getValue();
// Stride of one or negative one can have reuse with non-addresses.
- if (C->isOne() || C->isAllOnesValue())
+ if (C->isOne() || C->isMinusOne())
goto decline_post_inc;
// Avoid weird situations.
if (C->getValue().getMinSignedBits() >= 64 ||
@@ -4306,6 +4314,104 @@ void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
}
}
+/// If a LSRUse has multiple formulae with the same ScaledReg and Scale.
+/// Pick the best one and delete the others.
+/// This narrowing heuristic is to keep as many formulae with different
+/// Scale and ScaledReg pair as possible while narrowing the search space.
+/// The benefit is that it is more likely to find out a better solution
+/// from a formulae set with more Scale and ScaledReg variations than
+/// a formulae set with the same Scale and ScaledReg. The picking winner
+/// reg heurstic will often keep the formulae with the same Scale and
+/// ScaledReg and filter others, and we want to avoid that if possible.
+void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
+ if (EstimateSearchSpaceComplexity() < ComplexityLimit)
+ return;
+
+ DEBUG(dbgs() << "The search space is too complex.\n"
+ "Narrowing the search space by choosing the best Formula "
+ "from the Formulae with the same Scale and ScaledReg.\n");
+
+ // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse.
+ typedef DenseMap<std::pair<const SCEV *, int64_t>, size_t> BestFormulaeTy;
+ BestFormulaeTy BestFormulae;
+#ifndef NDEBUG
+ bool ChangedFormulae = false;
+#endif
+ DenseSet<const SCEV *> VisitedRegs;
+ SmallPtrSet<const SCEV *, 16> Regs;
+
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
+ DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n');
+
+ // Return true if Formula FA is better than Formula FB.
+ auto IsBetterThan = [&](Formula &FA, Formula &FB) {
+ // First we will try to choose the Formula with fewer new registers.
+ // For a register used by current Formula, the more the register is
+ // shared among LSRUses, the less we increase the register number
+ // counter of the formula.
+ size_t FARegNum = 0;
+ for (const SCEV *Reg : FA.BaseRegs) {
+ const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg);
+ FARegNum += (NumUses - UsedByIndices.count() + 1);
+ }
+ size_t FBRegNum = 0;
+ for (const SCEV *Reg : FB.BaseRegs) {
+ const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg);
+ FBRegNum += (NumUses - UsedByIndices.count() + 1);
+ }
+ if (FARegNum != FBRegNum)
+ return FARegNum < FBRegNum;
+
+ // If the new register numbers are the same, choose the Formula with
+ // less Cost.
+ Cost CostFA, CostFB;
+ Regs.clear();
+ CostFA.RateFormula(TTI, FA, Regs, VisitedRegs, L, SE, DT, LU);
+ Regs.clear();
+ CostFB.RateFormula(TTI, FB, Regs, VisitedRegs, L, SE, DT, LU);
+ return CostFA.isLess(CostFB, TTI);
+ };
+
+ bool Any = false;
+ for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms;
+ ++FIdx) {
+ Formula &F = LU.Formulae[FIdx];
+ if (!F.ScaledReg)
+ continue;
+ auto P = BestFormulae.insert({{F.ScaledReg, F.Scale}, FIdx});
+ if (P.second)
+ continue;
+
+ Formula &Best = LU.Formulae[P.first->second];
+ if (IsBetterThan(F, Best))
+ std::swap(F, Best);
+ DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
+ dbgs() << "\n"
+ " in favor of formula ";
+ Best.print(dbgs()); dbgs() << '\n');
+#ifndef NDEBUG
+ ChangedFormulae = true;
+#endif
+ LU.DeleteFormula(F);
+ --FIdx;
+ --NumForms;
+ Any = true;
+ }
+ if (Any)
+ LU.RecomputeRegs(LUIdx, RegUses);
+
+ // Reset this to prepare for the next use.
+ BestFormulae.clear();
+ }
+
+ DEBUG(if (ChangedFormulae) {
+ dbgs() << "\n"
+ "After filtering out undesirable candidates:\n";
+ print_uses(dbgs());
+ });
+}
+
/// The function delete formulas with high registers number expectation.
/// Assuming we don't know the value of each formula (already delete
/// all inefficient), generate probability of not selecting for each
@@ -4516,6 +4622,8 @@ void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
NarrowSearchSpaceByDetectingSupersets();
NarrowSearchSpaceByCollapsingUnrolledCode();
NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
+ if (FilterSameScaledReg)
+ NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
if (LSRExpNarrow)
NarrowSearchSpaceByDeletingCostlyFormulas();
else
diff --git a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index acd3ef6791be..6727cf0179c1 100644
--- a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -238,7 +238,7 @@ PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0,
&BB->front());
NewPN->addIncoming(Opd1, S0->getParent());
NewPN->addIncoming(Opd2, S1->getParent());
- if (MD && NewPN->getType()->getScalarType()->isPointerTy())
+ if (MD && NewPN->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(NewPN);
return NewPN;
}
diff --git a/lib/Transforms/Scalar/NewGVN.cpp b/lib/Transforms/Scalar/NewGVN.cpp
index 9cf01c6582b5..9d018563618e 100644
--- a/lib/Transforms/Scalar/NewGVN.cpp
+++ b/lib/Transforms/Scalar/NewGVN.cpp
@@ -866,9 +866,7 @@ PHIExpression *NewGVN::createPHIExpression(Instruction *I, bool &HasBackedge,
// Things in TOPClass are equivalent to everything.
if (ValueToClass.lookup(*U) == TOPClass)
return false;
- if (lookupOperandLeader(*U) == PN)
- return false;
- return true;
+ return lookupOperandLeader(*U) != PN;
});
std::transform(Filtered.begin(), Filtered.end(), op_inserter(E),
[&](const Use *U) -> Value * {
@@ -2063,9 +2061,10 @@ Value *NewGVN::getNextValueLeader(CongruenceClass *CC) const {
//
// The invariants of this function are:
//
-// I must be moving to NewClass from OldClass The StoreCount of OldClass and
-// NewClass is expected to have been updated for I already if it is is a store.
-// The OldClass memory leader has not been updated yet if I was the leader.
+// - I must be moving to NewClass from OldClass
+// - The StoreCount of OldClass and NewClass is expected to have been updated
+// for I already if it is is a store.
+// - The OldClass memory leader has not been updated yet if I was the leader.
void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I,
MemoryAccess *InstMA,
CongruenceClass *OldClass,
@@ -2074,7 +2073,8 @@ void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I,
// be the MemoryAccess of OldClass.
assert((!InstMA || !OldClass->getMemoryLeader() ||
OldClass->getLeader() != I ||
- OldClass->getMemoryLeader() == InstMA) &&
+ MemoryAccessToClass.lookup(OldClass->getMemoryLeader()) ==
+ MemoryAccessToClass.lookup(InstMA)) &&
"Representative MemoryAccess mismatch");
// First, see what happens to the new class
if (!NewClass->getMemoryLeader()) {
@@ -2136,7 +2136,7 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E,
<< NewClass->getID() << " from " << *NewClass->getLeader()
<< " to " << *SI << " because store joined class\n");
// If we changed the leader, we have to mark it changed because we don't
- // know what it will do to symbolic evlauation.
+ // know what it will do to symbolic evaluation.
NewClass->setLeader(SI);
}
// We rely on the code below handling the MemoryAccess change.
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index cdba0062953f..29d1ba406ae4 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -2148,7 +2148,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) {
if (I->getOpcode() == Instruction::Mul &&
cast<Instruction>(I->user_back())->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Ops.back().Op) &&
- cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) {
+ cast<ConstantInt>(Ops.back().Op)->isMinusOne()) {
ValueEntry Tmp = Ops.pop_back_val();
Ops.insert(Ops.begin(), Tmp);
} else if (I->getOpcode() == Instruction::FMul &&
diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index a73e9aec0617..f19d45329d23 100644
--- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1994,7 +1994,7 @@ static void rematerializeLiveValues(CallSite CS,
Instruction *LastClonedValue = nullptr;
Instruction *LastValue = nullptr;
for (Instruction *Instr: ChainToBase) {
- // Only GEP's and casts are suported as we need to be careful to not
+ // Only GEP's and casts are supported as we need to be careful to not
// introduce any new uses of pointers not in the liveset.
// Note that it's fine to introduce new uses of pointers which were
// otherwise not used after this statepoint.
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 7a6fa1711411..a738ebb4607e 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -963,7 +963,7 @@ void SCCPSolver::visitBinaryOperator(Instruction &I) {
} else {
// X or -1 = -1
if (ConstantInt *CI = NonOverdefVal->getConstantInt())
- if (CI->isAllOnesValue())
+ if (CI->isMinusOne())
return markConstant(IV, &I, NonOverdefVal->getConstant());
}
}
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index 4729f4ef5956..b9cee5b2ba95 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -1673,8 +1673,7 @@ static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
// See if we need inttoptr for this type pair. A cast involving both scalars
// and vectors requires and additional bitcast.
- if (OldTy->getScalarType()->isIntegerTy() &&
- NewTy->getScalarType()->isPointerTy()) {
+ if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
// Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
if (OldTy->isVectorTy() && !NewTy->isVectorTy())
return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
@@ -1690,8 +1689,7 @@ static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
// See if we need ptrtoint for this type pair. A cast involving both scalars
// and vectors requires and additional bitcast.
- if (OldTy->getScalarType()->isPointerTy() &&
- NewTy->getScalarType()->isIntegerTy()) {
+ if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) {
// Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
if (OldTy->isVectorTy() && !NewTy->isVectorTy())
return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
@@ -2400,7 +2398,7 @@ private:
LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
LI.isVolatile(), LI.getName());
if (LI.isVolatile())
- NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
// Any !nonnull metadata or !range metadata on the old load is also valid
// on the new load. This is even true in some cases even when the loads
@@ -2435,7 +2433,7 @@ private:
getSliceAlign(TargetTy),
LI.isVolatile(), LI.getName());
if (LI.isVolatile())
- NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
V = NewLI;
IsPtrAdjusted = true;
@@ -2578,7 +2576,7 @@ private:
}
NewSI->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access);
if (SI.isVolatile())
- NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope());
+ NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
Pass.DeadInsts.insert(&SI);
deleteIfTriviallyDead(OldOp);
diff --git a/lib/Transforms/Scalar/StructurizeCFG.cpp b/lib/Transforms/Scalar/StructurizeCFG.cpp
index 486f3e5a43d4..0cccb415efdb 100644
--- a/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -329,7 +329,7 @@ void StructurizeCFG::analyzeLoops(RegionNode *N) {
Loops[Exit] = N->getEntry();
} else {
- // Test for sucessors as back edge
+ // Test for successors as back edge
BasicBlock *BB = N->getNodeAs<BasicBlock>();
BranchInst *Term = cast<BranchInst>(BB->getTerminator());
diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp
index 314c990293cc..7e75e8847785 100644
--- a/lib/Transforms/Utils/CloneFunction.cpp
+++ b/lib/Transforms/Utils/CloneFunction.cpp
@@ -46,13 +46,21 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
if (BB->hasName()) NewBB->setName(BB->getName()+NameSuffix);
bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false;
-
+ Module *TheModule = F ? F->getParent() : nullptr;
+
// Loop over all instructions, and copy them over.
for (BasicBlock::const_iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
- if (DIFinder && F->getParent() && II->getDebugLoc())
- DIFinder->processLocation(*F->getParent(), II->getDebugLoc().get());
+ if (DIFinder && TheModule) {
+ if (auto *DDI = dyn_cast<DbgDeclareInst>(II))
+ DIFinder->processDeclare(*TheModule, DDI);
+ else if (auto *DVI = dyn_cast<DbgValueInst>(II))
+ DIFinder->processValue(*TheModule, DVI);
+
+ if (auto DbgLoc = II->getDebugLoc())
+ DIFinder->processLocation(*TheModule, DbgLoc.get());
+ }
Instruction *NewInst = II->clone();
if (II->hasName())
@@ -153,6 +161,8 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
// When we remap instructions, we want to avoid duplicating inlined
// DISubprograms, so record all subprograms we find as we duplicate
// instructions and then freeze them in the MD map.
+ // We also record information about dbg.value and dbg.declare to avoid
+ // duplicating the types.
DebugInfoFinder DIFinder;
// Loop over all of the basic blocks in the function, cloning them as
@@ -193,6 +203,10 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
}
}
+ for (auto *Type : DIFinder.types()) {
+ VMap.MD()[Type].reset(Type);
+ }
+
// Loop over all of the instructions in the function, fixing up operand
// references as we go. This uses VMap to do all the hard work.
for (Function::iterator BB =
diff --git a/lib/Transforms/Utils/CmpInstAnalysis.cpp b/lib/Transforms/Utils/CmpInstAnalysis.cpp
index 9f4d9c7e3981..d9294c499309 100644
--- a/lib/Transforms/Utils/CmpInstAnalysis.cpp
+++ b/lib/Transforms/Utils/CmpInstAnalysis.cpp
@@ -81,7 +81,7 @@ bool llvm::decomposeBitTestICmp(const ICmpInst *I, CmpInst::Predicate &Pred,
break;
case ICmpInst::ICMP_SGT:
// X > -1 is equivalent to (X & SignMask) == 0.
- if (!C->isAllOnesValue())
+ if (!C->isMinusOne())
return false;
Y = ConstantInt::get(I->getContext(), APInt::getSignMask(C->getBitWidth()));
Pred = ICmpInst::ICMP_EQ;
diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp
index 30d8856cfbef..1189714dfab1 100644
--- a/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/lib/Transforms/Utils/CodeExtractor.cpp
@@ -1116,12 +1116,6 @@ Function *CodeExtractor::extractCodeRegion() {
}
}
- //cerr << "NEW FUNCTION: " << *newFunction;
- // verifyFunction(*newFunction);
-
- // cerr << "OLD FUNCTION: " << *oldFunction;
- // verifyFunction(*oldFunction);
-
DEBUG(if (verifyFunction(*newFunction))
report_fatal_error("verifyFunction failed!"));
return newFunction;
diff --git a/lib/Transforms/Utils/Evaluator.cpp b/lib/Transforms/Utils/Evaluator.cpp
index c97e544e620a..1328f2f3ec01 100644
--- a/lib/Transforms/Utils/Evaluator.cpp
+++ b/lib/Transforms/Utils/Evaluator.cpp
@@ -402,7 +402,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Value *Ptr = PtrArg->stripPointerCasts();
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
Type *ElemTy = GV->getValueType();
- if (!Size->isAllOnesValue() &&
+ if (!Size->isMinusOne() &&
Size->getValue().getLimitedValue() >=
DL.getTypeStoreSize(ElemTy)) {
Invariants.insert(GV);
diff --git a/lib/Transforms/Utils/FunctionComparator.cpp b/lib/Transforms/Utils/FunctionComparator.cpp
index 0457294361b5..4a2be3a53176 100644
--- a/lib/Transforms/Utils/FunctionComparator.cpp
+++ b/lib/Transforms/Utils/FunctionComparator.cpp
@@ -513,8 +513,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res =
cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
return Res;
- if (int Res =
- cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
+ if (int Res = cmpNumbers(LI->getSyncScopeID(),
+ cast<LoadInst>(R)->getSyncScopeID()))
return Res;
return cmpRangeMetadata(LI->getMetadata(LLVMContext::MD_range),
cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
@@ -529,7 +529,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res =
cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
return Res;
- return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
+ return cmpNumbers(SI->getSyncScopeID(),
+ cast<StoreInst>(R)->getSyncScopeID());
}
if (const CmpInst *CI = dyn_cast<CmpInst>(L))
return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
@@ -584,7 +585,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res =
cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
return Res;
- return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
+ return cmpNumbers(FI->getSyncScopeID(),
+ cast<FenceInst>(R)->getSyncScopeID());
}
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
if (int Res = cmpNumbers(CXI->isVolatile(),
@@ -601,8 +603,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
cmpOrderings(CXI->getFailureOrdering(),
cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
return Res;
- return cmpNumbers(CXI->getSynchScope(),
- cast<AtomicCmpXchgInst>(R)->getSynchScope());
+ return cmpNumbers(CXI->getSyncScopeID(),
+ cast<AtomicCmpXchgInst>(R)->getSyncScopeID());
}
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
if (int Res = cmpNumbers(RMWI->getOperation(),
@@ -614,8 +616,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res = cmpOrderings(RMWI->getOrdering(),
cast<AtomicRMWInst>(R)->getOrdering()))
return Res;
- return cmpNumbers(RMWI->getSynchScope(),
- cast<AtomicRMWInst>(R)->getSynchScope());
+ return cmpNumbers(RMWI->getSyncScopeID(),
+ cast<AtomicRMWInst>(R)->getSyncScopeID());
}
if (const PHINode *PNL = dyn_cast<PHINode>(L)) {
const PHINode *PNR = cast<PHINode>(R);
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index 5127eba3f9ae..74610613001c 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -1662,9 +1662,10 @@ void llvm::removeUnwindEdge(BasicBlock *BB) {
TI->eraseFromParent();
}
-/// removeUnreachableBlocksFromFn - Remove blocks that are not reachable, even
+/// removeUnreachableBlocks - Remove blocks that are not reachable, even
/// if they are in a dead cycle. Return true if a change was made, false
-/// otherwise.
+/// otherwise. If `LVI` is passed, this function preserves LazyValueInfo
+/// after modifying the CFG.
bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI) {
SmallPtrSet<BasicBlock*, 16> Reachable;
bool Changed = markAliveBlocks(F, Reachable);
@@ -2168,6 +2169,9 @@ bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
return true;
case Instruction::Call:
case Instruction::Invoke:
+ // Can't handle inline asm. Skip it.
+ if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue()))
+ return false;
// Many arithmetic intrinsics have no issue taking a
// variable, however it's hard to distingish these from
// specials such as @llvm.frameaddress that require a constant.
@@ -2182,12 +2186,18 @@ bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
case Instruction::ShuffleVector:
// Shufflevector masks are constant.
return OpIdx != 2;
+ case Instruction::Switch:
case Instruction::ExtractValue:
- case Instruction::InsertValue:
// All operands apart from the first are constant.
return OpIdx == 0;
+ case Instruction::InsertValue:
+ // All operands apart from the first and the second are constant.
+ return OpIdx < 2;
case Instruction::Alloca:
- return false;
+ // Static allocas (constant size in the entry block) are handled by
+ // prologue/epilogue insertion so they're free anyway. We definitely don't
+ // want to make them non-constant.
+ return !dyn_cast<AllocaInst>(I)->isStaticAlloca();
case Instruction::GetElementPtr:
if (OpIdx == 0)
return true;
diff --git a/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 9ad2b707e6b2..5170c68e2915 100644
--- a/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -65,9 +65,11 @@ static cl::opt<bool> UnrollRuntimeMultiExit(
/// than the unroll factor.
///
static void ConnectProlog(Loop *L, Value *BECount, unsigned Count,
- BasicBlock *PrologExit, BasicBlock *PreHeader,
- BasicBlock *NewPreHeader, ValueToValueMapTy &VMap,
- DominatorTree *DT, LoopInfo *LI, bool PreserveLCSSA) {
+ BasicBlock *PrologExit,
+ BasicBlock *OriginalLoopLatchExit,
+ BasicBlock *PreHeader, BasicBlock *NewPreHeader,
+ ValueToValueMapTy &VMap, DominatorTree *DT,
+ LoopInfo *LI, bool PreserveLCSSA) {
BasicBlock *Latch = L->getLoopLatch();
assert(Latch && "Loop must have a latch");
BasicBlock *PrologLatch = cast<BasicBlock>(VMap[Latch]);
@@ -142,17 +144,15 @@ static void ConnectProlog(Loop *L, Value *BECount, unsigned Count,
// then (BECount + 1) cannot unsigned-overflow.
Value *BrLoopExit =
B.CreateICmpULT(BECount, ConstantInt::get(BECount->getType(), Count - 1));
- BasicBlock *Exit = L->getUniqueExitBlock();
- assert(Exit && "Loop must have a single exit block only");
// Split the exit to maintain loop canonicalization guarantees
- SmallVector<BasicBlock*, 4> Preds(predecessors(Exit));
- SplitBlockPredecessors(Exit, Preds, ".unr-lcssa", DT, LI,
+ SmallVector<BasicBlock *, 4> Preds(predecessors(OriginalLoopLatchExit));
+ SplitBlockPredecessors(OriginalLoopLatchExit, Preds, ".unr-lcssa", DT, LI,
PreserveLCSSA);
// Add the branch to the exit block (around the unrolled loop)
- B.CreateCondBr(BrLoopExit, Exit, NewPreHeader);
+ B.CreateCondBr(BrLoopExit, OriginalLoopLatchExit, NewPreHeader);
InsertPt->eraseFromParent();
if (DT)
- DT->changeImmediateDominator(Exit, PrologExit);
+ DT->changeImmediateDominator(OriginalLoopLatchExit, PrologExit);
}
/// Connect the unrolling epilog code to the original loop.
@@ -427,6 +427,50 @@ CloneLoopBlocks(Loop *L, Value *NewIter, const bool CreateRemainderLoop,
return nullptr;
}
+/// Returns true if we can safely unroll a multi-exit/exiting loop. OtherExits
+/// is populated with all the loop exit blocks other than the LatchExit block.
+static bool
+canSafelyUnrollMultiExitLoop(Loop *L, SmallVectorImpl<BasicBlock *> &OtherExits,
+ BasicBlock *LatchExit, bool PreserveLCSSA,
+ bool UseEpilogRemainder) {
+
+ // Support runtime unrolling for multiple exit blocks and multiple exiting
+ // blocks.
+ if (!UnrollRuntimeMultiExit)
+ return false;
+ // Even if runtime multi exit is enabled, we currently have some correctness
+ // constrains in unrolling a multi-exit loop.
+ // We rely on LCSSA form being preserved when the exit blocks are transformed.
+ if (!PreserveLCSSA)
+ return false;
+ SmallVector<BasicBlock *, 4> Exits;
+ L->getUniqueExitBlocks(Exits);
+ for (auto *BB : Exits)
+ if (BB != LatchExit)
+ OtherExits.push_back(BB);
+
+ // TODO: Support multiple exiting blocks jumping to the `LatchExit` when
+ // UnrollRuntimeMultiExit is true. This will need updating the logic in
+ // connectEpilog/connectProlog.
+ if (!LatchExit->getSinglePredecessor()) {
+ DEBUG(dbgs() << "Bailout for multi-exit handling when latch exit has >1 "
+ "predecessor.\n");
+ return false;
+ }
+ // FIXME: We bail out of multi-exit unrolling when epilog loop is generated
+ // and L is an inner loop. This is because in presence of multiple exits, the
+ // outer loop is incorrect: we do not add the EpilogPreheader and exit to the
+ // outer loop. This is automatically handled in the prolog case, so we do not
+ // have that bug in prolog generation.
+ if (UseEpilogRemainder && L->getParentLoop())
+ return false;
+
+ // All constraints have been satisfied.
+ return true;
+}
+
+
+
/// Insert code in the prolog/epilog code when unrolling a loop with a
/// run-time trip-count.
///
@@ -470,53 +514,40 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
bool UseEpilogRemainder,
LoopInfo *LI, ScalarEvolution *SE,
DominatorTree *DT, bool PreserveLCSSA) {
- // for now, only unroll loops that contain a single exit
- if (!UnrollRuntimeMultiExit && !L->getExitingBlock())
- return false;
+ DEBUG(dbgs() << "Trying runtime unrolling on Loop: \n");
+ DEBUG(L->dump());
// Make sure the loop is in canonical form.
- if (!L->isLoopSimplifyForm())
+ if (!L->isLoopSimplifyForm()) {
+ DEBUG(dbgs() << "Not in simplify form!\n");
return false;
+ }
// Guaranteed by LoopSimplifyForm.
BasicBlock *Latch = L->getLoopLatch();
BasicBlock *Header = L->getHeader();
- BasicBlock *LatchExit = L->getUniqueExitBlock(); // successor out of loop
- if (!LatchExit && !UnrollRuntimeMultiExit)
- return false;
- // These are exit blocks other than the target of the latch exiting block.
- SmallVector<BasicBlock *, 4> OtherExits;
BranchInst *LatchBR = cast<BranchInst>(Latch->getTerminator());
- unsigned int ExitIndex = LatchBR->getSuccessor(0) == Header ? 1 : 0;
+ unsigned ExitIndex = LatchBR->getSuccessor(0) == Header ? 1 : 0;
+ BasicBlock *LatchExit = LatchBR->getSuccessor(ExitIndex);
// Cloning the loop basic blocks (`CloneLoopBlocks`) requires that one of the
// targets of the Latch be an exit block out of the loop. This needs
// to be guaranteed by the callers of UnrollRuntimeLoopRemainder.
- assert(!L->contains(LatchBR->getSuccessor(ExitIndex)) &&
+ assert(!L->contains(LatchExit) &&
"one of the loop latch successors should be the exit block!");
- // Support runtime unrolling for multiple exit blocks and multiple exiting
- // blocks.
- if (!LatchExit) {
- assert(UseEpilogRemainder && "Multi exit unrolling is currently supported "
- "unrolling with epilog remainder only!");
- LatchExit = LatchBR->getSuccessor(ExitIndex);
- // We rely on LCSSA form being preserved when the exit blocks are
- // transformed.
- if (!PreserveLCSSA)
- return false;
- // TODO: Support multiple exiting blocks jumping to the `LatchExit`. This
- // will need updating the logic in connectEpilog.
- if (!LatchExit->getSinglePredecessor())
- return false;
- SmallVector<BasicBlock *, 4> Exits;
- L->getUniqueExitBlocks(Exits);
- for (auto *BB : Exits)
- if (BB != LatchExit)
- OtherExits.push_back(BB);
+ // These are exit blocks other than the target of the latch exiting block.
+ SmallVector<BasicBlock *, 4> OtherExits;
+ bool isMultiExitUnrollingEnabled = canSafelyUnrollMultiExitLoop(
+ L, OtherExits, LatchExit, PreserveLCSSA, UseEpilogRemainder);
+ // Support only single exit and exiting block unless multi-exit loop unrolling is enabled.
+ if (!isMultiExitUnrollingEnabled &&
+ (!L->getExitingBlock() || OtherExits.size())) {
+ DEBUG(
+ dbgs()
+ << "Multiple exit/exiting blocks in loop and multi-exit unrolling not "
+ "enabled!\n");
+ return false;
}
-
- assert(LatchExit && "Latch Exit should exist!");
-
// Use Scalar Evolution to compute the trip count. This allows more loops to
// be unrolled than relying on induction var simplification.
if (!SE)
@@ -530,29 +561,38 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
// exiting blocks).
const SCEV *BECountSC = SE->getExitCount(L, Latch);
if (isa<SCEVCouldNotCompute>(BECountSC) ||
- !BECountSC->getType()->isIntegerTy())
+ !BECountSC->getType()->isIntegerTy()) {
+ DEBUG(dbgs() << "Could not compute exit block SCEV\n");
return false;
+ }
unsigned BEWidth = cast<IntegerType>(BECountSC->getType())->getBitWidth();
// Add 1 since the backedge count doesn't include the first loop iteration.
const SCEV *TripCountSC =
SE->getAddExpr(BECountSC, SE->getConstant(BECountSC->getType(), 1));
- if (isa<SCEVCouldNotCompute>(TripCountSC))
+ if (isa<SCEVCouldNotCompute>(TripCountSC)) {
+ DEBUG(dbgs() << "Could not compute trip count SCEV.\n");
return false;
+ }
BasicBlock *PreHeader = L->getLoopPreheader();
BranchInst *PreHeaderBR = cast<BranchInst>(PreHeader->getTerminator());
const DataLayout &DL = Header->getModule()->getDataLayout();
SCEVExpander Expander(*SE, DL, "loop-unroll");
if (!AllowExpensiveTripCount &&
- Expander.isHighCostExpansion(TripCountSC, L, PreHeaderBR))
+ Expander.isHighCostExpansion(TripCountSC, L, PreHeaderBR)) {
+ DEBUG(dbgs() << "High cost for expanding trip count scev!\n");
return false;
+ }
// This constraint lets us deal with an overflowing trip count easily; see the
// comment on ModVal below.
- if (Log2_32(Count) > BEWidth)
+ if (Log2_32(Count) > BEWidth) {
+ DEBUG(dbgs()
+ << "Count failed constraint on overflow trip count calculation.\n");
return false;
+ }
// Loop structure is the following:
//
@@ -711,11 +751,10 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
// node.
for (unsigned i =0; i < oldNumOperands; i++){
Value *newVal = VMap[Phi->getIncomingValue(i)];
- if (!newVal) {
- assert(isa<Constant>(Phi->getIncomingValue(i)) &&
- "VMap should exist for all values except constants!");
+ // newVal can be a constant or derived from values outside the loop, and
+ // hence need not have a VMap value.
+ if (!newVal)
newVal = Phi->getIncomingValue(i);
- }
Phi->addIncoming(newVal,
cast<BasicBlock>(VMap[Phi->getIncomingBlock(i)]));
}
@@ -781,8 +820,8 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
} else {
// Connect the prolog code to the original loop and update the
// PHI functions.
- ConnectProlog(L, BECount, Count, PrologExit, PreHeader, NewPreHeader,
- VMap, DT, LI, PreserveLCSSA);
+ ConnectProlog(L, BECount, Count, PrologExit, LatchExit, PreHeader,
+ NewPreHeader, VMap, DT, LI, PreserveLCSSA);
}
// If this loop is nested, then the loop unroller changes the code in the
diff --git a/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 1c2a60a6b8b2..900450b40061 100644
--- a/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -8,12 +8,256 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/LowerMemIntrinsics.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
using namespace llvm;
+static unsigned getLoopOperandSizeInBytes(Type *Type) {
+ if (VectorType *VTy = dyn_cast<VectorType>(Type)) {
+ return VTy->getBitWidth() / 8;
+ }
+
+ return Type->getPrimitiveSizeInBits() / 8;
+}
+
+void llvm::createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
+ Value *DstAddr, ConstantInt *CopyLen,
+ unsigned SrcAlign, unsigned DestAlign,
+ bool SrcIsVolatile, bool DstIsVolatile,
+ const TargetTransformInfo &TTI) {
+ // No need to expand zero length copies.
+ if (CopyLen->isZero())
+ return;
+
+ BasicBlock *PreLoopBB = InsertBefore->getParent();
+ BasicBlock *PostLoopBB = nullptr;
+ Function *ParentFunc = PreLoopBB->getParent();
+ LLVMContext &Ctx = PreLoopBB->getContext();
+
+ Type *TypeOfCopyLen = CopyLen->getType();
+ Type *LoopOpType =
+ TTI.getMemcpyLoopLoweringType(Ctx, CopyLen, SrcAlign, DestAlign);
+
+ unsigned LoopOpSize = getLoopOperandSizeInBytes(LoopOpType);
+ uint64_t LoopEndCount = CopyLen->getZExtValue() / LoopOpSize;
+
+ unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
+ unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
+
+ if (LoopEndCount != 0) {
+ // Split
+ PostLoopBB = PreLoopBB->splitBasicBlock(InsertBefore, "memcpy-split");
+ BasicBlock *LoopBB =
+ BasicBlock::Create(Ctx, "load-store-loop", ParentFunc, PostLoopBB);
+ PreLoopBB->getTerminator()->setSuccessor(0, LoopBB);
+
+ IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
+
+ // Cast the Src and Dst pointers to pointers to the loop operand type (if
+ // needed).
+ PointerType *SrcOpType = PointerType::get(LoopOpType, SrcAS);
+ PointerType *DstOpType = PointerType::get(LoopOpType, DstAS);
+ if (SrcAddr->getType() != SrcOpType) {
+ SrcAddr = PLBuilder.CreateBitCast(SrcAddr, SrcOpType);
+ }
+ if (DstAddr->getType() != DstOpType) {
+ DstAddr = PLBuilder.CreateBitCast(DstAddr, DstOpType);
+ }
+
+ IRBuilder<> LoopBuilder(LoopBB);
+ PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 2, "loop-index");
+ LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0U), PreLoopBB);
+ // Loop Body
+ Value *SrcGEP =
+ LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
+ Value *Load = LoopBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
+ Value *DstGEP =
+ LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
+ LoopBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
+
+ Value *NewIndex =
+ LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1U));
+ LoopIndex->addIncoming(NewIndex, LoopBB);
+
+ // Create the loop branch condition.
+ Constant *LoopEndCI = ConstantInt::get(TypeOfCopyLen, LoopEndCount);
+ LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, LoopEndCI),
+ LoopBB, PostLoopBB);
+ }
+
+ uint64_t BytesCopied = LoopEndCount * LoopOpSize;
+ uint64_t RemainingBytes = CopyLen->getZExtValue() - BytesCopied;
+ if (RemainingBytes) {
+ IRBuilder<> RBuilder(PostLoopBB ? PostLoopBB->getFirstNonPHI()
+ : InsertBefore);
+
+ // Update the alignment based on the copy size used in the loop body.
+ SrcAlign = std::min(SrcAlign, LoopOpSize);
+ DestAlign = std::min(DestAlign, LoopOpSize);
+
+ SmallVector<Type *, 5> RemainingOps;
+ TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
+ SrcAlign, DestAlign);
+
+ for (auto OpTy : RemainingOps) {
+ // Calaculate the new index
+ unsigned OperandSize = getLoopOperandSizeInBytes(OpTy);
+ uint64_t GepIndex = BytesCopied / OperandSize;
+ assert(GepIndex * OperandSize == BytesCopied &&
+ "Division should have no Remainder!");
+ // Cast source to operand type and load
+ PointerType *SrcPtrType = PointerType::get(OpTy, SrcAS);
+ Value *CastedSrc = SrcAddr->getType() == SrcPtrType
+ ? SrcAddr
+ : RBuilder.CreateBitCast(SrcAddr, SrcPtrType);
+ Value *SrcGEP = RBuilder.CreateInBoundsGEP(
+ OpTy, CastedSrc, ConstantInt::get(TypeOfCopyLen, GepIndex));
+ Value *Load = RBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
+
+ // Cast destination to operand type and store.
+ PointerType *DstPtrType = PointerType::get(OpTy, DstAS);
+ Value *CastedDst = DstAddr->getType() == DstPtrType
+ ? DstAddr
+ : RBuilder.CreateBitCast(DstAddr, DstPtrType);
+ Value *DstGEP = RBuilder.CreateInBoundsGEP(
+ OpTy, CastedDst, ConstantInt::get(TypeOfCopyLen, GepIndex));
+ RBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
+
+ BytesCopied += OperandSize;
+ }
+ }
+ assert(BytesCopied == CopyLen->getZExtValue() &&
+ "Bytes copied should match size in the call!");
+}
+
+void llvm::createMemCpyLoopUnknownSize(Instruction *InsertBefore,
+ Value *SrcAddr, Value *DstAddr,
+ Value *CopyLen, unsigned SrcAlign,
+ unsigned DestAlign, bool SrcIsVolatile,
+ bool DstIsVolatile,
+ const TargetTransformInfo &TTI) {
+ BasicBlock *PreLoopBB = InsertBefore->getParent();
+ BasicBlock *PostLoopBB =
+ PreLoopBB->splitBasicBlock(InsertBefore, "post-loop-memcpy-expansion");
+
+ Function *ParentFunc = PreLoopBB->getParent();
+ LLVMContext &Ctx = PreLoopBB->getContext();
+
+ Type *LoopOpType =
+ TTI.getMemcpyLoopLoweringType(Ctx, CopyLen, SrcAlign, DestAlign);
+ unsigned LoopOpSize = getLoopOperandSizeInBytes(LoopOpType);
+
+ IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
+
+ unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
+ unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
+ PointerType *SrcOpType = PointerType::get(LoopOpType, SrcAS);
+ PointerType *DstOpType = PointerType::get(LoopOpType, DstAS);
+ if (SrcAddr->getType() != SrcOpType) {
+ SrcAddr = PLBuilder.CreateBitCast(SrcAddr, SrcOpType);
+ }
+ if (DstAddr->getType() != DstOpType) {
+ DstAddr = PLBuilder.CreateBitCast(DstAddr, DstOpType);
+ }
+
+ // Calculate the loop trip count, and remaining bytes to copy after the loop.
+ Type *CopyLenType = CopyLen->getType();
+ IntegerType *ILengthType = dyn_cast<IntegerType>(CopyLenType);
+ assert(ILengthType &&
+ "expected size argument to memcpy to be an integer type!");
+ ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
+ Value *RuntimeLoopCount = PLBuilder.CreateUDiv(CopyLen, CILoopOpSize);
+ Value *RuntimeResidual = PLBuilder.CreateURem(CopyLen, CILoopOpSize);
+ Value *RuntimeBytesCopied = PLBuilder.CreateSub(CopyLen, RuntimeResidual);
+
+ BasicBlock *LoopBB =
+ BasicBlock::Create(Ctx, "loop-memcpy-expansion", ParentFunc, nullptr);
+ IRBuilder<> LoopBuilder(LoopBB);
+
+ PHINode *LoopIndex = LoopBuilder.CreatePHI(CopyLenType, 2, "loop-index");
+ LoopIndex->addIncoming(ConstantInt::get(CopyLenType, 0U), PreLoopBB);
+
+ Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
+ Value *Load = LoopBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
+ Value *DstGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
+ LoopBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
+
+ Value *NewIndex =
+ LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(CopyLenType, 1U));
+ LoopIndex->addIncoming(NewIndex, LoopBB);
+
+ Type *Int8Type = Type::getInt8Ty(Ctx);
+ if (LoopOpType != Int8Type) {
+ // Loop body for the residual copy.
+ BasicBlock *ResLoopBB = BasicBlock::Create(Ctx, "loop-memcpy-residual",
+ PreLoopBB->getParent(), nullptr);
+ // Residual loop header.
+ BasicBlock *ResHeaderBB = BasicBlock::Create(
+ Ctx, "loop-memcpy-residual-header", PreLoopBB->getParent(), nullptr);
+
+ // Need to update the pre-loop basic block to branch to the correct place.
+ // branch to the main loop if the count is non-zero, branch to the residual
+ // loop if the copy size is smaller then 1 iteration of the main loop but
+ // non-zero and finally branch to after the residual loop if the memcpy
+ // size is zero.
+ ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
+ PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
+ LoopBB, ResHeaderBB);
+ PreLoopBB->getTerminator()->eraseFromParent();
+
+ LoopBuilder.CreateCondBr(
+ LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
+ ResHeaderBB);
+
+ // Determine if we need to branch to the residual loop or bypass it.
+ IRBuilder<> RHBuilder(ResHeaderBB);
+ RHBuilder.CreateCondBr(RHBuilder.CreateICmpNE(RuntimeResidual, Zero),
+ ResLoopBB, PostLoopBB);
+
+ // Copy the residual with single byte load/store loop.
+ IRBuilder<> ResBuilder(ResLoopBB);
+ PHINode *ResidualIndex =
+ ResBuilder.CreatePHI(CopyLenType, 2, "residual-loop-index");
+ ResidualIndex->addIncoming(Zero, ResHeaderBB);
+
+ Value *SrcAsInt8 =
+ ResBuilder.CreateBitCast(SrcAddr, PointerType::get(Int8Type, SrcAS));
+ Value *DstAsInt8 =
+ ResBuilder.CreateBitCast(DstAddr, PointerType::get(Int8Type, DstAS));
+ Value *FullOffset = ResBuilder.CreateAdd(RuntimeBytesCopied, ResidualIndex);
+ Value *SrcGEP =
+ ResBuilder.CreateInBoundsGEP(Int8Type, SrcAsInt8, FullOffset);
+ Value *Load = ResBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
+ Value *DstGEP =
+ ResBuilder.CreateInBoundsGEP(Int8Type, DstAsInt8, FullOffset);
+ ResBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
+
+ Value *ResNewIndex =
+ ResBuilder.CreateAdd(ResidualIndex, ConstantInt::get(CopyLenType, 1U));
+ ResidualIndex->addIncoming(ResNewIndex, ResLoopBB);
+
+ // Create the loop branch condition.
+ ResBuilder.CreateCondBr(
+ ResBuilder.CreateICmpULT(ResNewIndex, RuntimeResidual), ResLoopBB,
+ PostLoopBB);
+ } else {
+ // In this case the loop operand type was a byte, and there is no need for a
+ // residual loop to copy the remaining memory after the main loop.
+ // We do however need to patch up the control flow by creating the
+ // terminators for the preloop block and the memcpy loop.
+ ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
+ PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
+ LoopBB, PostLoopBB);
+ PreLoopBB->getTerminator()->eraseFromParent();
+ LoopBuilder.CreateCondBr(
+ LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
+ PostLoopBB);
+ }
+}
+
void llvm::createMemCpyLoop(Instruction *InsertBefore,
Value *SrcAddr, Value *DstAddr, Value *CopyLen,
unsigned SrcAlign, unsigned DestAlign,
@@ -208,15 +452,41 @@ static void createMemSetLoop(Instruction *InsertBefore,
NewBB);
}
-void llvm::expandMemCpyAsLoop(MemCpyInst *Memcpy) {
- createMemCpyLoop(/* InsertBefore */ Memcpy,
- /* SrcAddr */ Memcpy->getRawSource(),
- /* DstAddr */ Memcpy->getRawDest(),
- /* CopyLen */ Memcpy->getLength(),
- /* SrcAlign */ Memcpy->getAlignment(),
- /* DestAlign */ Memcpy->getAlignment(),
- /* SrcIsVolatile */ Memcpy->isVolatile(),
- /* DstIsVolatile */ Memcpy->isVolatile());
+void llvm::expandMemCpyAsLoop(MemCpyInst *Memcpy,
+ const TargetTransformInfo &TTI) {
+ // Original implementation
+ if (!TTI.useWideIRMemcpyLoopLowering()) {
+ createMemCpyLoop(/* InsertBefore */ Memcpy,
+ /* SrcAddr */ Memcpy->getRawSource(),
+ /* DstAddr */ Memcpy->getRawDest(),
+ /* CopyLen */ Memcpy->getLength(),
+ /* SrcAlign */ Memcpy->getAlignment(),
+ /* DestAlign */ Memcpy->getAlignment(),
+ /* SrcIsVolatile */ Memcpy->isVolatile(),
+ /* DstIsVolatile */ Memcpy->isVolatile());
+ } else {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Memcpy->getLength())) {
+ createMemCpyLoopKnownSize(/* InsertBefore */ Memcpy,
+ /* SrcAddr */ Memcpy->getRawSource(),
+ /* DstAddr */ Memcpy->getRawDest(),
+ /* CopyLen */ CI,
+ /* SrcAlign */ Memcpy->getAlignment(),
+ /* DestAlign */ Memcpy->getAlignment(),
+ /* SrcIsVolatile */ Memcpy->isVolatile(),
+ /* DstIsVolatile */ Memcpy->isVolatile(),
+ /* TargetTransformInfo */ TTI);
+ } else {
+ createMemCpyLoopUnknownSize(/* InsertBefore */ Memcpy,
+ /* SrcAddr */ Memcpy->getRawSource(),
+ /* DstAddr */ Memcpy->getRawDest(),
+ /* CopyLen */ Memcpy->getLength(),
+ /* SrcAlign */ Memcpy->getAlignment(),
+ /* DestAlign */ Memcpy->getAlignment(),
+ /* SrcIsVolatile */ Memcpy->isVolatile(),
+ /* DstIsVolatile */ Memcpy->isVolatile(),
+ /* TargetTransfomrInfo */ TTI);
+ }
+ }
}
void llvm::expandMemMoveAsLoop(MemMoveInst *Memmove) {
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index e724b0a28c32..dee658f98393 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5754,8 +5754,8 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (BasicBlock *Dom = BB->getSinglePredecessor()) {
auto *PBI = dyn_cast_or_null<BranchInst>(Dom->getTerminator());
if (PBI && PBI->isConditional() &&
- PBI->getSuccessor(0) != PBI->getSuccessor(1) &&
- (PBI->getSuccessor(0) == BB || PBI->getSuccessor(1) == BB)) {
+ PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
+ assert(PBI->getSuccessor(0) == BB || PBI->getSuccessor(1) == BB);
bool CondIsFalse = PBI->getSuccessor(1) == BB;
Optional<bool> Implication = isImpliedCondition(
PBI->getCondition(), BI->getCondition(), DL, CondIsFalse);
diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp
index ec8b0d426265..6d90e6b48358 100644
--- a/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -25,6 +25,7 @@
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -80,6 +81,7 @@ namespace {
bool IsSigned);
bool eliminateSDiv(BinaryOperator *SDiv);
bool strengthenOverflowingOperation(BinaryOperator *OBO, Value *IVOperand);
+ bool strengthenRightShift(BinaryOperator *BO, Value *IVOperand);
};
}
@@ -154,6 +156,7 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand)
void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) {
unsigned IVOperIdx = 0;
ICmpInst::Predicate Pred = ICmp->getPredicate();
+ ICmpInst::Predicate OriginalPred = Pred;
if (IVOperand != ICmp->getOperand(0)) {
// Swapped
assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
@@ -262,6 +265,16 @@ void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) {
ICmp->setPredicate(InvariantPredicate);
ICmp->setOperand(0, NewLHS);
ICmp->setOperand(1, NewRHS);
+ } else if (ICmpInst::isSigned(OriginalPred) &&
+ SE->isKnownNonNegative(S) && SE->isKnownNonNegative(X)) {
+ // If we were unable to make anything above, all we can is to canonicalize
+ // the comparison hoping that it will open the doors for other
+ // optimizations. If we find out that we compare two non-negative values,
+ // we turn the instruction's predicate to its unsigned version. Note that
+ // we cannot rely on Pred here unless we check if we have swapped it.
+ assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?");
+ DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp << '\n');
+ ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred));
} else
return;
@@ -583,6 +596,35 @@ bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
return Changed;
}
+/// Annotate the Shr in (X << IVOperand) >> C as exact using the
+/// information from the IV's range. Returns true if anything changed, false
+/// otherwise.
+bool SimplifyIndvar::strengthenRightShift(BinaryOperator *BO,
+ Value *IVOperand) {
+ using namespace llvm::PatternMatch;
+
+ if (BO->getOpcode() == Instruction::Shl) {
+ bool Changed = false;
+ ConstantRange IVRange = SE->getUnsignedRange(SE->getSCEV(IVOperand));
+ for (auto *U : BO->users()) {
+ const APInt *C;
+ if (match(U,
+ m_AShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C))) ||
+ match(U,
+ m_LShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C)))) {
+ BinaryOperator *Shr = cast<BinaryOperator>(U);
+ if (!Shr->isExact() && IVRange.getUnsignedMin().uge(*C)) {
+ Shr->setIsExact(true);
+ Changed = true;
+ }
+ }
+ }
+ return Changed;
+ }
+
+ return false;
+}
+
/// Add all uses of Def to the current IV's worklist.
static void pushIVUsers(
Instruction *Def,
@@ -675,8 +717,9 @@ void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
}
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(UseOper.first)) {
- if (isa<OverflowingBinaryOperator>(BO) &&
- strengthenOverflowingOperation(BO, IVOperand)) {
+ if ((isa<OverflowingBinaryOperator>(BO) &&
+ strengthenOverflowingOperation(BO, IVOperand)) ||
+ (isa<ShlOperator>(BO) && strengthenRightShift(BO, IVOperand))) {
// re-queue uses of the now modified binary operator and fall
// through to the checks that remain.
pushIVUsers(IVOperand, Simplified, SimpleIVUsers);
diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp
index b723b65f35e5..77c0a41929ac 100644
--- a/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -656,7 +656,7 @@ Value *LibCallSimplifier::optimizeMemChr(CallInst *CI, IRBuilder<> &B) {
ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
// memchr(x, y, 0) -> null
- if (LenC && LenC->isNullValue())
+ if (LenC && LenC->isZero())
return Constant::getNullValue(CI->getType());
// From now on we need at least constant length and string.
@@ -2280,7 +2280,7 @@ bool FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
return true;
if (ConstantInt *ObjSizeCI =
dyn_cast<ConstantInt>(CI->getArgOperand(ObjSizeOp))) {
- if (ObjSizeCI->isAllOnesValue())
+ if (ObjSizeCI->isMinusOne())
return true;
// If the object size wasn't -1 (unknown), bail out if we were asked to.
if (OnlyLowerUnknownSize)
diff --git a/lib/Transforms/Utils/VNCoercion.cpp b/lib/Transforms/Utils/VNCoercion.cpp
index 60d9ede2c487..c3feea6a0a41 100644
--- a/lib/Transforms/Utils/VNCoercion.cpp
+++ b/lib/Transforms/Utils/VNCoercion.cpp
@@ -51,25 +51,24 @@ static T *coerceAvailableValueToLoadTypeHelper(T *StoredVal, Type *LoadedTy,
// If the store and reload are the same size, we can always reuse it.
if (StoredValSize == LoadedValSize) {
// Pointer to Pointer -> use bitcast.
- if (StoredValTy->getScalarType()->isPointerTy() &&
- LoadedTy->getScalarType()->isPointerTy()) {
+ if (StoredValTy->isPtrOrPtrVectorTy() && LoadedTy->isPtrOrPtrVectorTy()) {
StoredVal = Helper.CreateBitCast(StoredVal, LoadedTy);
} else {
// Convert source pointers to integers, which can be bitcast.
- if (StoredValTy->getScalarType()->isPointerTy()) {
+ if (StoredValTy->isPtrOrPtrVectorTy()) {
StoredValTy = DL.getIntPtrType(StoredValTy);
StoredVal = Helper.CreatePtrToInt(StoredVal, StoredValTy);
}
Type *TypeToCastTo = LoadedTy;
- if (TypeToCastTo->getScalarType()->isPointerTy())
+ if (TypeToCastTo->isPtrOrPtrVectorTy())
TypeToCastTo = DL.getIntPtrType(TypeToCastTo);
if (StoredValTy != TypeToCastTo)
StoredVal = Helper.CreateBitCast(StoredVal, TypeToCastTo);
// Cast to pointer if the load needs a pointer type.
- if (LoadedTy->getScalarType()->isPointerTy())
+ if (LoadedTy->isPtrOrPtrVectorTy())
StoredVal = Helper.CreateIntToPtr(StoredVal, LoadedTy);
}
@@ -86,7 +85,7 @@ static T *coerceAvailableValueToLoadTypeHelper(T *StoredVal, Type *LoadedTy,
"canCoerceMustAliasedValueToLoad fail");
// Convert source pointers to integers, which can be manipulated.
- if (StoredValTy->getScalarType()->isPointerTy()) {
+ if (StoredValTy->isPtrOrPtrVectorTy()) {
StoredValTy = DL.getIntPtrType(StoredValTy);
StoredVal = Helper.CreatePtrToInt(StoredVal, StoredValTy);
}
@@ -112,7 +111,7 @@ static T *coerceAvailableValueToLoadTypeHelper(T *StoredVal, Type *LoadedTy,
if (LoadedTy != NewIntTy) {
// If the result is a pointer, inttoptr.
- if (LoadedTy->getScalarType()->isPointerTy())
+ if (LoadedTy->isPtrOrPtrVectorTy())
StoredVal = Helper.CreateIntToPtr(StoredVal, LoadedTy);
else
// Otherwise, bitcast.
@@ -316,7 +315,7 @@ static T *getStoreValueForLoadHelper(T *SrcVal, unsigned Offset, Type *LoadTy,
uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8;
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
- if (SrcVal->getType()->getScalarType()->isPointerTy())
+ if (SrcVal->getType()->isPtrOrPtrVectorTy())
SrcVal = Helper.CreatePtrToInt(SrcVal, DL.getIntPtrType(SrcVal->getType()));
if (!SrcVal->getType()->isIntegerTy())
SrcVal = Helper.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize * 8));
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 193cc4d13787..eb82ee283d44 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -5315,8 +5315,13 @@ void LoopVectorizationLegality::addInductionPhi(
// Both the PHI node itself, and the "post-increment" value feeding
// back into the PHI node may have external users.
- AllowedExit.insert(Phi);
- AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
+ // We can allow those uses, except if the SCEVs we have for them rely
+ // on predicates that only hold within the loop, since allowing the exit
+ // currently means re-using this SCEV outside the loop.
+ if (PSE.getUnionPredicate().isAlwaysTrue()) {
+ AllowedExit.insert(Phi);
+ AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
+ }
DEBUG(dbgs() << "LV: Found an induction variable.\n");
return;
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index b494526369d6..4425043ad39a 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -860,7 +860,7 @@ private:
bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP);
/// Un-bundles a group of instructions.
- void cancelScheduling(ArrayRef<Value *> VL);
+ void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
/// Extends the scheduling region so that V is inside the region.
/// \returns true if the region size is within the limit.
@@ -1258,7 +1258,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
if (Term) {
DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1284,7 +1284,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (Reuse) {
DEBUG(dbgs() << "SLP: Reusing extract sequence.\n");
} else {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
}
newTreeEntry(VL, Reuse, UserTreeIdx);
return;
@@ -1301,7 +1301,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (DL->getTypeSizeInBits(ScalarTy) !=
DL->getTypeAllocSizeInBits(ScalarTy)) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
return;
@@ -1312,7 +1312,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
LoadInst *L = cast<LoadInst>(VL[i]);
if (!L->isSimple()) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
return;
@@ -1349,7 +1349,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
break;
}
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
if (ReverseConsecutive) {
@@ -1376,7 +1376,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned i = 0; i < VL.size(); ++i) {
Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
if (Ty != SrcTy || !isValidElementType(Ty)) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
return;
@@ -1404,7 +1404,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
CmpInst *Cmp = cast<CmpInst>(VL[i]);
if (Cmp->getPredicate() != P0 ||
Cmp->getOperand(0)->getType() != ComparedTy) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
return;
@@ -1471,7 +1471,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned j = 0; j < VL.size(); ++j) {
if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1484,7 +1484,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
if (Ty0 != CurTy) {
DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1496,7 +1496,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!isa<ConstantInt>(Op)) {
DEBUG(
dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1518,7 +1518,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// Check if the stores are consecutive or of we need to swizzle them.
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
return;
@@ -1541,7 +1541,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// represented by an intrinsic call
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
if (!isTriviallyVectorizable(ID)) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
return;
@@ -1555,7 +1555,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!CI2 || CI2->getCalledFunction() != Int ||
getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
!CI->hasIdenticalOperandBundleSchema(*CI2)) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
<< "\n");
@@ -1566,7 +1566,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (hasVectorInstrinsicScalarOpd(ID, 1)) {
Value *A1J = CI2->getArgOperand(1);
if (A1I != A1J) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
<< " argument "<< A1I<<"!=" << A1J
@@ -1579,7 +1579,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
!std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
CI->op_begin() + CI->getBundleOperandsEndIndex(),
CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="
<< *VL[i] << '\n');
@@ -1603,7 +1603,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// If this is not an alternate sequence of opcode like add-sub
// then do not vectorize this instruction.
if (!isAltShuffle) {
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
return;
@@ -1631,7 +1631,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
return;
}
default:
- BS.cancelScheduling(VL);
+ BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
return;
@@ -3177,17 +3177,18 @@ bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
}
}
if (!Bundle->isReady()) {
- cancelScheduling(VL);
+ cancelScheduling(VL, VL[0]);
return false;
}
return true;
}
-void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) {
- if (isa<PHINode>(VL[0]))
+void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
+ Value *OpValue) {
+ if (isa<PHINode>(OpValue))
return;
- ScheduleData *Bundle = getScheduleData(VL[0]);
+ ScheduleData *Bundle = getScheduleData(OpValue);
DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
assert(!Bundle->IsScheduled &&
"Can't cancel bundle which is already scheduled");
diff --git a/runtimes/CMakeLists.txt b/runtimes/CMakeLists.txt
index 6793a49a2ddc..3e3eff39d637 100644
--- a/runtimes/CMakeLists.txt
+++ b/runtimes/CMakeLists.txt
@@ -62,7 +62,7 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
set(LLVM_MAIN_SRC_DIR ${LLVM_BUILD_MAIN_SRC_DIR})
if(APPLE)
- set(LLVM_ENABLE_LIBCXX ON CACHE BOOL "")
+ set(LLVM_ENABLE_LIBCXX ON CACHE BOOL "")
endif()
set(SAFE_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
@@ -96,15 +96,33 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
# The subdirectories need to treat this as standalone builds
set(${canon_name}_STANDALONE_BUILD On)
+ if(LLVM_RUNTIMES_TARGET)
+ if(NOT "${entry}" MATCHES "compiler-rt")
+ set(${canon_name}_INSTALL_PREFIX "lib/${LLVM_RUNTIMES_TARGET}/" CACHE STRING "" FORCE)
+ endif()
+ endif()
+
# Setting a variable to let sub-projects detect which other projects
# will be included under here.
set(HAVE_${canon_name} On)
endforeach()
+ set(SAFE_LLVM_BINARY_DIR ${LLVM_BINARY_DIR})
+ set(SAFE_LLVM_LIBRARY_OUTPUT_INTDIR ${LLVM_LIBRARY_OUTPUT_INTDIR})
+ set(SAFE_LLVM_RUNTIMES_OUTPUT_INTDIR ${LLVM_RUNTIME_OUTPUT_INTDIR})
+
# We do this in two loops so that HAVE_* is set for each runtime before the
# other runtimes are added.
foreach(entry ${runtimes})
get_filename_component(projName ${entry} NAME)
+
+ if(LLVM_RUNTIMES_TARGET)
+ if(NOT "${entry}" MATCHES "compiler-rt")
+ set(LLVM_BINARY_DIR "${LLVM_LIBRARY_DIR}/${LLVM_RUNTIMES_TARGET}")
+ set(LLVM_LIBRARY_OUTPUT_INTDIR "${LLVM_LIBRARY_DIR}/${LLVM_RUNTIMES_TARGET}/lib")
+ set(LLVM_RUNTIME_OUTPUT_INTDIR "${LLVM_TOOLS_BINARY_DIR}/${LLVM_RUNTIMES_TARGET}")
+ endif()
+ endif()
# Between each sub-project we want to cache and clear the LIT properties
set_property(GLOBAL PROPERTY LLVM_LIT_TESTSUITES)
@@ -123,6 +141,14 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
list(APPEND RUNTIMES_LIT_PARAMS ${LLVM_LIT_PARAMS})
list(APPEND RUNTIMES_LIT_DEPENDS ${LLVM_LIT_DEPENDS})
list(APPEND RUNTIMES_LIT_EXTRA_ARGS ${LLVM_LIT_EXTRA_ARGS})
+
+ if(LLVM_RUNTIMES_TARGET)
+ if(NOT "${entry}" MATCHES "compiler-rt")
+ set(LLVM_BINARY_DIR "${SAFE_LLVM_BINARY_DIR}")
+ set(LLVM_LIBRARY_OUTPUT_INTDIR "${SAFE_LLVM_LIBRARY_OUTPUT_INTDIR}")
+ set(LLVM_RUNTIME_OUTPUT_INTDIR "${SAFE_LLVM_RUNTIME_OUTPUT_INTDIR}")
+ endif()
+ endif()
endforeach()
if(LLVM_INCLUDE_TESTS)
@@ -147,9 +173,9 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
message(SEND_ERROR "Missing target for runtime component ${component}!")
continue()
endif()
- if(LLVM_INCLUDE_TESTS AND NOT TARGET check-${component})
- message(SEND_ERROR "Missing check target for runtime component ${component}!")
- continue()
+
+ if(TARGET check-${component})
+ list(APPEND SUB_CHECK_TARGETS check-${component})
endif()
if(TARGET install-${component})
@@ -157,14 +183,18 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
endif()
endforeach()
- configure_file(
- ${CMAKE_CURRENT_SOURCE_DIR}/Components.cmake.in
- ${LLVM_BINARY_DIR}/runtimes/Components.cmake)
+ if(LLVM_RUNTIMES_TARGET)
+ configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/Components.cmake.in
+ ${LLVM_BINARY_DIR}/runtimes/${LLVM_RUNTIMES_TARGET}/Components.cmake)
+ else()
+ configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/Components.cmake.in
+ ${LLVM_BINARY_DIR}/runtimes/Components.cmake)
+ endif()
endif()
else() # if this is included from LLVM's CMake
- include(${LLVM_BINARY_DIR}/runtimes/Components.cmake OPTIONAL)
- set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${LLVM_BINARY_DIR}/runtimes/Components.cmake)
include(LLVMExternalProjectUtils)
if(NOT LLVM_BUILD_RUNTIMES)
@@ -190,6 +220,10 @@ else() # if this is included from LLVM's CMake
add_custom_target(builtins)
add_custom_target(install-builtins)
foreach(target ${LLVM_BUILTIN_TARGETS})
+ if(target STREQUAL "default")
+ set(target ${LLVM_DEFAULT_TARGET_TRIPLE})
+ endif()
+
string(REPLACE "-" ";" builtin_target_list ${target})
foreach(item ${builtin_target_list})
string(TOLOWER "${item}" item_lower)
@@ -246,40 +280,137 @@ else() # if this is included from LLVM's CMake
list(APPEND runtime_names ${projName})
endforeach()
- if(runtimes)
+ # runtime_register_target(target)
+ # Utility function to register external runtime target.
+ function(runtime_register_target target)
+ if(target STREQUAL LLVM_DEFAULT_TARGET_TRIPLE)
+ include(${LLVM_BINARY_DIR}/runtimes/Components.cmake OPTIONAL)
+ set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${LLVM_BINARY_DIR}/runtimes/Components.cmake)
+ else()
+ include(${LLVM_BINARY_DIR}/runtimes/${target}/Components.cmake OPTIONAL)
+ set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${LLVM_BINARY_DIR}/runtimes/${target}/Components.cmake)
+ endif()
foreach(runtime_name ${runtime_names})
- list(APPEND extra_targets
- ${runtime_name}
- install-${runtime_name}
- check-${runtime_name})
+ list(APPEND ${target}_extra_targets
+ ${runtime_name}-${target}
+ install-${runtime_name}-${target})
+ if(LLVM_INCLUDE_TESTS)
+ list(APPEND ${target}_test_targets check-${runtime_name}-${target})
+ endif()
+ endforeach()
+
+ foreach(name IN LISTS SUB_COMPONENTS SUB_INSTALL_TARGETS)
+ list(APPEND ${target}_extra_targets "${name}:${name}-${target}")
endforeach()
if(LLVM_INCLUDE_TESTS)
- set(test_targets runtimes-test-depends check-runtimes)
- foreach(component ${SUB_COMPONENTS})
- list(APPEND SUB_COMPONENT_CHECK_TARGETS check-${component})
+ list(APPEND ${target}_test_targets runtimes-test-depends-${target} check-runtimes-${target})
+ foreach(name IN LISTS SUB_CHECK_TARGETS)
+ list(APPEND ${target}_test_targets "${name}:${name}-${target}")
+ list(APPEND test_targets ${name}-${target})
endforeach()
+ set(test_targets "${test_targets}" PARENT_SCOPE)
endif()
- # Create a runtimes target that uses this file as its top-level CMake file.
- # The runtimes target is a configuration of all the runtime libraries
- # together in a single CMake invocaiton.
- llvm_ExternalProject_Add(runtimes
+ get_cmake_property(variableNames VARIABLES)
+ foreach(variableName ${variableNames})
+ if(variableName MATCHES "^RUNTIMES_${target}")
+ string(REPLACE "RUNTIMES_${target}_" "" new_name ${variableName})
+ list(APPEND ${target}_extra_args "-D${new_name}=${${variableName}}")
+ endif()
+ endforeach()
+
+ if(NOT target STREQUAL LLVM_DEFAULT_TARGET_TRIPLE)
+ list(APPEND ${target}_extra_args "-DLLVM_RUNTIMES_TARGET=${target}")
+ endif()
+
+ llvm_ExternalProject_Add(runtimes-${target}
${CMAKE_CURRENT_SOURCE_DIR}
DEPENDS ${deps}
# Builtins were built separately above
CMAKE_ARGS -DCOMPILER_RT_BUILD_BUILTINS=Off
-DLLVM_INCLUDE_TESTS=${LLVM_INCLUDE_TESTS}
+ -DCMAKE_C_COMPILER_TARGET=${target}
+ -DCMAKE_CXX_COMPILER_TARGET=${target}
+ -DCMAKE_ASM_COMPILER_TARGET=${target}
+ -DCMAKE_C_COMPILER_WORKS=ON
+ -DCMAKE_CXX_COMPILER_WORKS=ON
+ -DCMAKE_ASM_COMPILER_WORKS=ON
+ -DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON
+ ${${target}_extra_args}
PASSTHROUGH_PREFIXES ${prefixes}
- EXTRA_TARGETS ${extra_targets}
- ${test_targets}
- ${SUB_COMPONENTS}
- ${SUB_COMPONENT_CHECK_TARGETS}
- ${SUB_INSTALL_TARGETS}
+ EXTRA_TARGETS ${${target}_extra_targets}
+ ${${target}_test_targets}
USE_TOOLCHAIN
${EXTRA_ARGS})
-
+ endfunction()
+
+ if(runtimes)
+ # Create a runtimes target that uses this file as its top-level CMake file.
+ # The runtimes target is a configuration of all the runtime libraries
+ # together in a single CMake invocaiton.
+ if(NOT LLVM_RUNTIME_TARGETS)
+ include(${LLVM_BINARY_DIR}/runtimes/Components.cmake OPTIONAL)
+ set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${LLVM_BINARY_DIR}/runtimes/Components.cmake)
+
+ foreach(runtime_name ${runtime_names})
+ list(APPEND extra_targets
+ ${runtime_name}
+ install-${runtime_name})
+ if(LLVM_INCLUDE_TESTS)
+ list(APPEND test_targets check-${runtime_name})
+ endif()
+ endforeach()
+
+ if(LLVM_INCLUDE_TESTS)
+ list(APPEND test_targets runtimes-test-depends check-runtimes)
+ foreach(component ${SUB_COMPONENTS})
+ list(APPEND SUB_CHECK_TARGETS check-${component})
+ endforeach()
+ endif()
+
+ llvm_ExternalProject_Add(runtimes
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ DEPENDS ${deps}
+ # Builtins were built separately above
+ CMAKE_ARGS -DCOMPILER_RT_BUILD_BUILTINS=Off
+ -DLLVM_INCLUDE_TESTS=${LLVM_INCLUDE_TESTS}
+ PASSTHROUGH_PREFIXES ${prefixes}
+ EXTRA_TARGETS ${extra_targets}
+ ${test_targets}
+ ${SUB_COMPONENTS}
+ ${SUB_CHECK_TARGETS}
+ ${SUB_INSTALL_TARGETS}
+ USE_TOOLCHAIN
+ ${EXTRA_ARGS})
+ else()
+ add_custom_target(runtimes)
+ add_custom_target(runtimes-configure)
+ add_custom_target(install-runtimes)
+ if(LLVM_INCLUDE_TESTS)
+ add_custom_target(check-runtimes)
+ add_custom_target(runtimes-test-depends)
+ set(test_targets "")
+ endif()
+
+ foreach(target ${LLVM_RUNTIME_TARGETS})
+ if(target STREQUAL "default")
+ set(target ${LLVM_DEFAULT_TARGET_TRIPLE})
+ endif()
+
+ runtime_register_target(${target})
+
+ add_dependencies(runtimes runtimes-${target})
+ add_dependencies(runtimes-configure runtimes-${target}-configure)
+ add_dependencies(install-runtimes install-runtimes-${target})
+ if(LLVM_INCLUDE_TESTS)
+ add_dependencies(check-runtimes check-runtimes-${target})
+ add_dependencies(runtimes-test-depends runtimes-test-depends-${target})
+ endif()
+ endforeach()
+ endif()
+
# TODO: This is a hack needed because the libcxx headers are copied into the
# build directory during configuration. Without that step the clang in the
# build directory cannot find the C++ headers in certain configurations.
@@ -292,6 +423,21 @@ else() # if this is included from LLVM's CMake
if(LLVM_INCLUDE_TESTS)
set_property(GLOBAL APPEND PROPERTY LLVM_ADDITIONAL_TEST_DEPENDS runtimes-test-depends)
set_property(GLOBAL APPEND PROPERTY LLVM_ADDITIONAL_TEST_TARGETS check-runtimes)
+
+ set(RUNTIMES_TEST_DEPENDS
+ FileCheck
+ count
+ llvm-nm
+ llvm-objdump
+ llvm-xray
+ not
+ obj2yaml
+ sancov
+ sanstats
+ )
+ foreach(target ${test_targets} ${SUB_CHECK_TARGETS})
+ add_dependencies(${target} ${RUNTIMES_TEST_DEPENDS})
+ endforeach()
endif()
endif()
endif()
diff --git a/runtimes/Components.cmake.in b/runtimes/Components.cmake.in
index 6e24ac380d18..1d8fb7ab174c 100644
--- a/runtimes/Components.cmake.in
+++ b/runtimes/Components.cmake.in
@@ -1,2 +1,3 @@
set(SUB_COMPONENTS @SUB_COMPONENTS@)
+set(SUB_CHECK_TARGETS @SUB_CHECK_TARGETS@)
set(SUB_INSTALL_TARGETS @SUB_INSTALL_TARGETS@)
diff --git a/test/Analysis/BasicAA/unreachable-block.ll b/test/Analysis/BasicAA/unreachable-block.ll
index 551d18e3e0fb..d6c149f81661 100644
--- a/test/Analysis/BasicAA/unreachable-block.ll
+++ b/test/Analysis/BasicAA/unreachable-block.ll
@@ -1,4 +1,4 @@
-; RUN: opt -basicaa -aa-eval -disable-output < %s >& /dev/null
+; RUN: opt -basicaa -aa-eval -disable-output < %s > /dev/null 2>&1
; BasicAA shouldn't infinitely recurse on the use-def cycles in
; unreachable code.
diff --git a/test/Analysis/CostModel/X86/slm-arith-costs.ll b/test/Analysis/CostModel/X86/slm-arith-costs.ll
index 3673a5d9e067..a767aa30b8ed 100644
--- a/test/Analysis/CostModel/X86/slm-arith-costs.ll
+++ b/test/Analysis/CostModel/X86/slm-arith-costs.ll
@@ -3,6 +3,20 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
+define <2 x i64> @slm-costs_64_vector_add(<2 x i64> %a, <2 x i64> %b) {
+entry:
+; SLM: cost of 4 {{.*}} add <2 x i64>
+ %res = add <2 x i64> %a, %b
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @slm-costs_64_vector_sub(<2 x i64> %a, <2 x i64> %b) {
+entry:
+; SLM: cost of 4 {{.*}} sub <2 x i64>
+ %res = sub <2 x i64> %a, %b
+ ret <2 x i64> %res
+}
+
; 8bit mul
define i8 @slm-costs_8_scalar_mul(i8 %a, i8 %b) {
entry:
@@ -13,7 +27,7 @@ entry:
define <2 x i8> @slm-costs_8_v2_mul(<2 x i8> %a, <2 x i8> %b) {
entry:
-; SLM: cost of 11 {{.*}} mul nsw <2 x i8>
+; SLM: cost of 17 {{.*}} mul nsw <2 x i8>
%res = mul nsw <2 x i8> %a, %b
ret <2 x i8> %res
}
@@ -97,7 +111,7 @@ entry:
define <2 x i16> @slm-costs_16_v2_mul(<2 x i16> %a, <2 x i16> %b) {
entry:
-; SLM: cost of 11 {{.*}} mul nsw <2 x i16>
+; SLM: cost of 17 {{.*}} mul nsw <2 x i16>
%res = mul nsw <2 x i16> %a, %b
ret <2 x i16> %res
}
@@ -181,7 +195,7 @@ entry:
define <2 x i32> @slm-costs_32_v2_mul(<2 x i32> %a, <2 x i32> %b) {
entry:
-; SLM: cost of 11 {{.*}} mul nsw <2 x i32>
+; SLM: cost of 17 {{.*}} mul nsw <2 x i32>
%res = mul nsw <2 x i32> %a, %b
ret <2 x i32> %res
}
@@ -217,28 +231,28 @@ entry:
define <2 x i64> @slm-costs_64_v2_mul(<2 x i64> %a, <2 x i64> %b) {
entry:
-; SLM: cost of 11 {{.*}} mul nsw <2 x i64>
+; SLM: cost of 17 {{.*}} mul nsw <2 x i64>
%res = mul nsw <2 x i64> %a, %b
ret <2 x i64> %res
}
define <4 x i64> @slm-costs_64_v4_mul(<4 x i64> %a, <4 x i64> %b) {
entry:
-; SLM: cost of 22 {{.*}} mul nsw <4 x i64>
+; SLM: cost of 34 {{.*}} mul nsw <4 x i64>
%res = mul nsw <4 x i64> %a, %b
ret <4 x i64> %res
}
define <8 x i64> @slm-costs_64_v8_mul(<8 x i64> %a, <8 x i64> %b) {
entry:
-; SLM: cost of 44 {{.*}} mul nsw <8 x i64>
+; SLM: cost of 68 {{.*}} mul nsw <8 x i64>
%res = mul nsw <8 x i64> %a, %b
ret <8 x i64> %res
}
define <16 x i64> @slm-costs_64_v16_mul(<16 x i64> %a, <16 x i64> %b) {
entry:
-; SLM: cost of 88 {{.*}} mul nsw <16 x i64>
+; SLM: cost of 136 {{.*}} mul nsw <16 x i64>
%res = mul nsw <16 x i64> %a, %b
ret <16 x i64> %res
}
diff --git a/test/Analysis/DependenceAnalysis/BasePtrBug.ll b/test/Analysis/DependenceAnalysis/BasePtrBug.ll
new file mode 100644
index 000000000000..8de75df7dbdd
--- /dev/null
+++ b/test/Analysis/DependenceAnalysis/BasePtrBug.ll
@@ -0,0 +1,80 @@
+; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
+
+; Test that the dependence analysis generates the correct results when using
+; an aliased object that points to a different element in the same array.
+; PR33567 - https://bugs.llvm.org/show_bug.cgi?id=33567
+
+; void test1(int *A, int *B, int N) {
+; int *top = A;
+; int *bot = A + N/2;
+; for (int i = 0; i < N; i++)
+; B[i] = top[i] + bot[i];
+; }
+
+; CHECK-LABEL: test1
+; CHECK: da analyze - input [*|<]!
+
+define void @test1(i32* nocapture %A, i32* nocapture %B, i32 %N) #0 {
+entry:
+ %cmp9 = icmp sgt i32 %N, 0
+ br i1 %cmp9, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph:
+ %div = sdiv i32 %N, 2
+ %bot.gep = getelementptr i32, i32* %A, i32 %div
+ br label %for.body
+
+for.body:
+ %i = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %gep.0 = getelementptr i32, i32* %A, i32 %i
+ %gep.1 = getelementptr i32, i32* %bot.gep, i32 %i
+ %gep.B = getelementptr i32, i32* %B, i32 %i
+ %0 = load i32, i32* %gep.0, align 4
+ %1 = load i32, i32* %gep.1, align 4
+ %add = add nsw i32 %1, %0
+ store i32 %add, i32* %gep.B, align 4
+ %inc = add nsw i32 %i, 1
+ %exitcond = icmp eq i32 %inc, %N
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+
+; void test2(int *A, unsigned n) {
+; int *B = A + 1;
+; for (unsigned i = 0; i < n; ++i) {
+; A[i] = B[i];
+; }
+; }
+
+; CHECK-LABEL: test2
+; CHECK: da analyze - consistent anti [1]!
+
+define void @test2(i32*, i32) #3 {
+ %3 = getelementptr inbounds i32, i32* %0, i64 1
+ br label %4
+
+; <label>:4:
+ %.0 = phi i32 [ 0, %2 ], [ %14, %13 ]
+ %5 = sub i32 %1, 1
+ %6 = icmp ult i32 %.0, %5
+ br i1 %6, label %7, label %15
+
+; <label>:7:
+ %8 = zext i32 %.0 to i64
+ %9 = getelementptr inbounds i32, i32* %3, i64 %8
+ %10 = load i32, i32* %9, align 4
+ %11 = zext i32 %.0 to i64
+ %12 = getelementptr inbounds i32, i32* %0, i64 %11
+ store i32 %10, i32* %12, align 4
+ br label %13
+
+; <label>:13:
+ %14 = add i32 %.0, 1
+ br label %4
+
+; <label>:15:
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/guards.ll b/test/Analysis/ScalarEvolution/guards.ll
index 52ad4dc73d41..d4b1f431ffc6 100644
--- a/test/Analysis/ScalarEvolution/guards.ll
+++ b/test/Analysis/ScalarEvolution/guards.ll
@@ -19,7 +19,7 @@ entry:
loop:
; CHECK: loop:
; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
-; CHECK: %iv.inc.cmp = icmp slt i32 %iv.inc, %len
+; CHECK: %iv.inc.cmp = icmp ult i32 %iv.inc, %len
; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 %iv.inc.cmp) [ "deopt"() ]
; CHECK: leave:
@@ -41,7 +41,7 @@ leave:
define void @test_2(i32 %n, i32* %len_buf) {
; CHECK-LABEL: @test_2(
-; CHECK: [[LEN_SEXT:%[^ ]+]] = sext i32 %len to i64
+; CHECK: [[LEN_ZEXT:%[^ ]+]] = zext i32 %len to i64
; CHECK: br label %loop
entry:
@@ -52,7 +52,7 @@ loop:
; CHECK: loop:
; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %loop ], [ 0, %entry ]
; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-; CHECK: %iv.inc.cmp = icmp slt i64 %indvars.iv.next, [[LEN_SEXT]]
+; CHECK: %iv.inc.cmp = icmp ult i64 %indvars.iv.next, [[LEN_ZEXT]]
; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 %iv.inc.cmp) [ "deopt"() ]
; CHECK: leave:
diff --git a/test/Assembler/2003-11-11-ImplicitRename.ll b/test/Assembler/2003-11-11-ImplicitRename.ll
index 7bfd3c14bf19..84065a17846d 100644
--- a/test/Assembler/2003-11-11-ImplicitRename.ll
+++ b/test/Assembler/2003-11-11-ImplicitRename.ll
@@ -1,8 +1,7 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
void %test() {
%X = add int 0, 1
%X = add int 1, 2
ret void
}
-
diff --git a/test/Assembler/2007-11-26-AttributeOverload.ll b/test/Assembler/2007-11-26-AttributeOverload.ll
index aebc2e8d01e5..ab5d514a38b6 100644
--- a/test/Assembler/2007-11-26-AttributeOverload.ll
+++ b/test/Assembler/2007-11-26-AttributeOverload.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
declare i32 @atoi(i8*) nounwind readonly
declare i32 @atoi(i8*)
diff --git a/test/Assembler/atomic.ll b/test/Assembler/atomic.ll
index 148b95d88e30..a8b527f2f863 100644
--- a/test/Assembler/atomic.ll
+++ b/test/Assembler/atomic.ll
@@ -5,14 +5,20 @@
define void @f(i32* %x) {
; CHECK: load atomic i32, i32* %x unordered, align 4
load atomic i32, i32* %x unordered, align 4
- ; CHECK: load atomic volatile i32, i32* %x singlethread acquire, align 4
- load atomic volatile i32, i32* %x singlethread acquire, align 4
+ ; CHECK: load atomic volatile i32, i32* %x syncscope("singlethread") acquire, align 4
+ load atomic volatile i32, i32* %x syncscope("singlethread") acquire, align 4
+ ; CHECK: load atomic volatile i32, i32* %x syncscope("agent") acquire, align 4
+ load atomic volatile i32, i32* %x syncscope("agent") acquire, align 4
; CHECK: store atomic i32 3, i32* %x release, align 4
store atomic i32 3, i32* %x release, align 4
- ; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
- store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
- ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic
- cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic
+ ; CHECK: store atomic volatile i32 3, i32* %x syncscope("singlethread") monotonic, align 4
+ store atomic volatile i32 3, i32* %x syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 3, i32* %x syncscope("workgroup") monotonic, align 4
+ store atomic volatile i32 3, i32* %x syncscope("workgroup") monotonic, align 4
+ ; CHECK: cmpxchg i32* %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic
+ cmpxchg i32* %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic
+ ; CHECK: cmpxchg i32* %x, i32 1, i32 0 syncscope("workitem") monotonic monotonic
+ cmpxchg i32* %x, i32 1, i32 0 syncscope("workitem") monotonic monotonic
; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
@@ -23,9 +29,13 @@ define void @f(i32* %x) {
atomicrmw add i32* %x, i32 10 seq_cst
; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
atomicrmw volatile xchg i32* %x, i32 10 monotonic
- ; CHECK: fence singlethread release
- fence singlethread release
+ ; CHECK: atomicrmw volatile xchg i32* %x, i32 10 syncscope("agent") monotonic
+ atomicrmw volatile xchg i32* %x, i32 10 syncscope("agent") monotonic
+ ; CHECK: fence syncscope("singlethread") release
+ fence syncscope("singlethread") release
; CHECK: fence seq_cst
fence seq_cst
+ ; CHECK: fence syncscope("device") seq_cst
+ fence syncscope("device") seq_cst
ret void
}
diff --git a/test/Bitcode/Inputs/module-hash-strtab1.ll b/test/Bitcode/Inputs/module-hash-strtab1.ll
new file mode 100644
index 000000000000..6b4a3fce07ef
--- /dev/null
+++ b/test/Bitcode/Inputs/module-hash-strtab1.ll
@@ -0,0 +1,10 @@
+source_filename = "foo.c"
+
+$com = comdat any
+
+define void @main() comdat($com) {
+ call void @bar()
+ ret void
+}
+
+declare void @bar()
diff --git a/test/Bitcode/Inputs/module-hash-strtab2.ll b/test/Bitcode/Inputs/module-hash-strtab2.ll
new file mode 100644
index 000000000000..87d2478145bc
--- /dev/null
+++ b/test/Bitcode/Inputs/module-hash-strtab2.ll
@@ -0,0 +1,10 @@
+source_filename = "foo.c"
+
+$dat = comdat any
+
+define void @main() comdat($dat) {
+ call void @foo()
+ ret void
+}
+
+declare void @foo()
diff --git a/test/Bitcode/atomic-no-syncscope.ll b/test/Bitcode/atomic-no-syncscope.ll
new file mode 100644
index 000000000000..a57507bc8146
--- /dev/null
+++ b/test/Bitcode/atomic-no-syncscope.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-dis -o - %s.bc | FileCheck %s
+
+; Backwards compatibility test: make sure we can process bitcode without
+; synchronization scope names encoded in it.
+
+; CHECK: load atomic i32, i32* %x unordered, align 4
+; CHECK: load atomic volatile i32, i32* %x syncscope("singlethread") acquire, align 4
+; CHECK: store atomic i32 3, i32* %x release, align 4
+; CHECK: store atomic volatile i32 3, i32* %x syncscope("singlethread") monotonic, align 4
+; CHECK: cmpxchg i32* %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic
+; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
+; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
+; CHECK: cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic
+; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
+; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
+; CHECK: fence syncscope("singlethread") release
+; CHECK: fence seq_cst
diff --git a/test/Bitcode/atomic-no-syncscope.ll.bc b/test/Bitcode/atomic-no-syncscope.ll.bc
new file mode 100644
index 000000000000..01d565eb4426
--- /dev/null
+++ b/test/Bitcode/atomic-no-syncscope.ll.bc
Binary files differ
diff --git a/test/Bitcode/atomic.ll b/test/Bitcode/atomic.ll
index c09e74c1c2f2..bef3f2712935 100644
--- a/test/Bitcode/atomic.ll
+++ b/test/Bitcode/atomic.ll
@@ -11,8 +11,8 @@ define void @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
- cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread release monotonic
- ; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread release monotonic
+ cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
+ ; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
ret void
}
diff --git a/test/Bitcode/compatibility-3.6.ll b/test/Bitcode/compatibility-3.6.ll
index 8d51ee11a209..cf6c30e7c26c 100644
--- a/test/Bitcode/compatibility-3.6.ll
+++ b/test/Bitcode/compatibility-3.6.ll
@@ -551,8 +551,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -571,33 +571,33 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
; XXX: The parser spits out the load type here.
%ld.1 = load atomic i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility-3.7.ll b/test/Bitcode/compatibility-3.7.ll
index ebdf4c30587c..180dad258b68 100644
--- a/test/Bitcode/compatibility-3.7.ll
+++ b/test/Bitcode/compatibility-3.7.ll
@@ -596,8 +596,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -616,32 +616,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility-3.8.ll b/test/Bitcode/compatibility-3.8.ll
index 57ea3e068376..370c7f51a2b7 100644
--- a/test/Bitcode/compatibility-3.8.ll
+++ b/test/Bitcode/compatibility-3.8.ll
@@ -627,8 +627,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -647,32 +647,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility-3.9.ll b/test/Bitcode/compatibility-3.9.ll
index 2a6cfe14cdb1..4115cbd8fe64 100644
--- a/test/Bitcode/compatibility-3.9.ll
+++ b/test/Bitcode/compatibility-3.9.ll
@@ -698,8 +698,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -718,32 +718,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility-4.0.ll b/test/Bitcode/compatibility-4.0.ll
index c83c107a2927..eef925564ecb 100644
--- a/test/Bitcode/compatibility-4.0.ll
+++ b/test/Bitcode/compatibility-4.0.ll
@@ -698,8 +698,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -718,32 +718,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility.ll b/test/Bitcode/compatibility.ll
index ec69344947c5..ebd727ba9aee 100644
--- a/test/Bitcode/compatibility.ll
+++ b/test/Bitcode/compatibility.ll
@@ -705,8 +705,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -725,32 +725,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/memInstructions.3.2.ll b/test/Bitcode/memInstructions.3.2.ll
index 1ab05b6d1b42..c530b6d2cb17 100644
--- a/test/Bitcode/memInstructions.3.2.ll
+++ b/test/Bitcode/memInstructions.3.2.ll
@@ -107,29 +107,29 @@ entry:
; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
%res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
- %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
+ %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
- %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+ %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
- %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
+; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
+ %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
-; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
- %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+ %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
-; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
- %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
+ %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
- %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+ %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
- %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
+; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
+ %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
-; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
- %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+ %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
ret void
}
@@ -193,29 +193,29 @@ entry:
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
- store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
+ store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
- store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+ store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread release, align 1
- store atomic i8 2, i8* %ptr1 singlethread release, align 1
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
+ store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
- store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+ store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
- store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
+ store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
- store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+ store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
- store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
+ store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
- store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+ store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
ret void
}
@@ -232,13 +232,13 @@ entry:
; CHECK-NEXT: %res2 = extractvalue { i32, i1 } [[TMP]], 0
%res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
; CHECK-NEXT: %res3 = extractvalue { i32, i1 } [[TMP]], 0
- %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+ %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
; CHECK-NEXT: %res4 = extractvalue { i32, i1 } [[TMP]], 0
- %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+ %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
@@ -249,13 +249,13 @@ entry:
; CHECK-NEXT: %res6 = extractvalue { i32, i1 } [[TMP]], 0
%res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
; CHECK-NEXT: %res7 = extractvalue { i32, i1 } [[TMP]], 0
- %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+ %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
; CHECK-NEXT: %res8 = extractvalue { i32, i1 } [[TMP]], 0
- %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+ %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
@@ -266,13 +266,13 @@ entry:
; CHECK-NEXT: %res10 = extractvalue { i32, i1 } [[TMP]], 0
%res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
; CHECK-NEXT: %res11 = extractvalue { i32, i1 } [[TMP]], 0
- %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+ %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
; CHECK-NEXT: %res12 = extractvalue { i32, i1 } [[TMP]], 0
- %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+ %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
@@ -283,13 +283,13 @@ entry:
; CHECK-NEXT: %res14 = extractvalue { i32, i1 } [[TMP]], 0
%res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
; CHECK-NEXT: %res15 = extractvalue { i32, i1 } [[TMP]], 0
- %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+ %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
; CHECK-NEXT: %res16 = extractvalue { i32, i1 } [[TMP]], 0
- %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+ %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
@@ -300,13 +300,13 @@ entry:
; CHECK-NEXT: %res18 = extractvalue { i32, i1 } [[TMP]], 0
%res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
; CHECK-NEXT: %res19 = extractvalue { i32, i1 } [[TMP]], 0
- %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+ %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
; CHECK-NEXT: %res20 = extractvalue { i32, i1 } [[TMP]], 0
- %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+ %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
ret void
}
diff --git a/test/Bitcode/module-hash-strtab.ll b/test/Bitcode/module-hash-strtab.ll
new file mode 100644
index 000000000000..e5a1fb0c4077
--- /dev/null
+++ b/test/Bitcode/module-hash-strtab.ll
@@ -0,0 +1,15 @@
+; RUN: opt -module-hash %s -o - | llvm-bcanalyzer -dump | grep '<HASH' > %t
+; RUN: opt -module-hash %S/Inputs/module-hash-strtab1.ll -o - | llvm-bcanalyzer -dump | grep '<HASH' >> %t
+; RUN: opt -module-hash %S/Inputs/module-hash-strtab2.ll -o - | llvm-bcanalyzer -dump | grep '<HASH' >> %t
+; RUN: sort %t | uniq | count 3
+
+source_filename = "foo.c"
+
+$com = comdat any
+
+define void @main() comdat($com) {
+ call void @foo()
+ ret void
+}
+
+declare void @foo()
diff --git a/test/Bitcode/module_hash.ll b/test/Bitcode/module_hash.ll
index 56f3fdc4b7ea..b24819fe6fde 100644
--- a/test/Bitcode/module_hash.ll
+++ b/test/Bitcode/module_hash.ll
@@ -1,7 +1,7 @@
; Check per module hash.
-; RUN: opt -module-hash %s -o - | llvm-bcanalyzer -dump | FileCheck %s --check-prefix=MOD1
+; RUN: opt -module-hash %s -o - | llvm-bcanalyzer -dump -check-hash=foo | FileCheck %s --check-prefix=MOD1
; MOD1: <HASH op0={{[0-9]*}} op1={{[0-9]*}} op2={{[0-9]*}} op3={{[0-9]*}} op4={{[0-9]*}} (match)/>
-; RUN: opt -module-hash %p/Inputs/module_hash.ll -o - | llvm-bcanalyzer -dump | FileCheck %s --check-prefix=MOD2
+; RUN: opt -module-hash %p/Inputs/module_hash.ll -o - | llvm-bcanalyzer -dump -check-hash=bar | FileCheck %s --check-prefix=MOD2
; MOD2: <HASH op0={{[0-9]*}} op1={{[0-9]*}} op2={{[0-9]*}} op3={{[0-9]*}} op4={{[0-9]*}} (match)/>
; Check that the hash matches in the combined index.
@@ -21,8 +21,8 @@
; RUN: cat %t.hash | FileCheck %s --check-prefix=COMBINED
; First capture the value of the hash for the two modules.
-; COMBINED: <HASH op0=[[HASH1_1:[0-9]*]] op1=[[HASH1_2:[0-9]*]] op2=[[HASH1_3:[0-9]*]] op3=[[HASH1_4:[0-9]*]] op4=[[HASH1_5:[0-9]*]] (match)/>
-; COMBINED: <HASH op0=[[HASH2_1:[0-9]*]] op1=[[HASH2_2:[0-9]*]] op2=[[HASH2_3:[0-9]*]] op3=[[HASH2_4:[0-9]*]] op4=[[HASH2_5:[0-9]*]] (match)/>
+; COMBINED: <HASH op0=[[HASH1_1:[0-9]*]] op1=[[HASH1_2:[0-9]*]] op2=[[HASH1_3:[0-9]*]] op3=[[HASH1_4:[0-9]*]] op4=[[HASH1_5:[0-9]*]]/>
+; COMBINED: <HASH op0=[[HASH2_1:[0-9]*]] op1=[[HASH2_2:[0-9]*]] op2=[[HASH2_3:[0-9]*]] op3=[[HASH2_4:[0-9]*]] op4=[[HASH2_5:[0-9]*]]/>
; Validate against the value extracted from the combined index
; COMBINED-DAG: <HASH abbrevid={{[0-9]*}} op0=[[HASH1_1]] op1=[[HASH1_2]] op2=[[HASH1_3]] op3=[[HASH1_4]] op4=[[HASH1_5]]/>
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
index a4d259add609..86766f194688 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
@@ -29,7 +29,7 @@
; CHECK-NEXT: <VERSION
; CHECK-NEXT: <VALUE_GUID op0=25 op1=123/>
; op4=hot1 op6=cold op8=hot2 op10=hot4 op12=none1 op14=hot3 op16=none2 op18=none3 op20=123
-; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=3 op6=5 op7=1 op8=2 op9=3 op10=4 op11=1 op12=6 op13=2 op14=3 op15=3 op16=7 op17=2 op18=8 op19=2 op20=25 op21=3/>
+; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=3 op6=5 op7=1 op8=2 op9=3 op10=4 op11=1 op12=6 op13=2 op14=3 op15=3 op16=7 op17=2 op18=8 op19=2 op20=25 op21=4/>
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
index b62090efe20b..09a6bbcb51d5 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
@@ -29,7 +29,7 @@
; CHECK-NEXT: <VERSION
; CHECK-NEXT: <VALUE_GUID op0=25 op1=123/>
; op4=hot1 op6=cold op8=hot2 op10=hot4 op12=none1 op14=hot3 op16=none2 op18=none3 op20=123
-; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=3 op6=5 op7=1 op8=2 op9=3 op10=4 op11=3 op12=6 op13=2 op14=3 op15=3 op16=7 op17=2 op18=8 op19=2 op20=25 op21=3/>
+; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=3 op6=5 op7=1 op8=2 op9=3 op10=4 op11=3 op12=6 op13=2 op14=3 op15=3 op16=7 op17=2 op18=8 op19=2 op20=25 op21=4/>
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index 50ad83feed85..10ce87c2a187 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -1328,16 +1328,16 @@ define void @test_load_store_atomics(i8* %addr) {
; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
; CHECK: [[V1:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
-; CHECK: [[V2:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load singlethread seq_cst 1 from %ir.addr)
-; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store singlethread monotonic 1 into %ir.addr)
+; CHECK: [[V2:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
+; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
%v0 = load atomic i8, i8* %addr unordered, align 1
store atomic i8 %v0, i8* %addr monotonic, align 1
%v1 = load atomic i8, i8* %addr acquire, align 1
store atomic i8 %v1, i8* %addr release, align 1
- %v2 = load atomic i8, i8* %addr singlethread seq_cst, align 1
- store atomic i8 %v2, i8* %addr singlethread monotonic, align 1
+ %v2 = load atomic i8, i8* %addr syncscope("singlethread") seq_cst, align 1
+ store atomic i8 %v2, i8* %addr syncscope("singlethread") monotonic, align 1
ret void
}
diff --git a/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir b/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir
new file mode 100644
index 000000000000..8604b2769ba3
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir
@@ -0,0 +1,30 @@
+# RUN: llc -O0 -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @implicit_def() { ret void }
+...
+
+---
+# CHECK-LABEL: name: implicit_def
+name: implicit_def
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: [[DEF:%[0-9]+]] = IMPLICIT_DEF
+# CHECK: [[ADD:%[0-9]+]] = ADDWrr [[DEF]], [[DEF]]
+# CHECK: %w0 = COPY [[ADD]]
+body: |
+ bb.0:
+ %0(s32) = G_IMPLICIT_DEF
+ %1(s32) = G_ADD %0, %0
+ %w0 = COPY %1(s32)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir b/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir
new file mode 100644
index 000000000000..43e682c6b6ca
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir
@@ -0,0 +1,38 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @sdiv_s32_gpr() { ret void }
+...
+
+---
+# Check that we select a 32-bit GPR sdiv intrinsic into SDIVWrr for GPR32.
+# Also check that we constrain the register class of the COPY to GPR32.
+# CHECK-LABEL: name: sdiv_s32_gpr
+name: sdiv_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
+# CHECK-NEXT: - { id: 2, class: gpr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = SDIVWr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.sdiv.i32), %0, %1
+ %w0 = COPY %2(s32)
+...
diff --git a/test/CodeGen/AArch64/arm64-csldst-mmo.ll b/test/CodeGen/AArch64/arm64-csldst-mmo.ll
index cfb8e3a38c49..37cc5411aa31 100644
--- a/test/CodeGen/AArch64/arm64-csldst-mmo.ll
+++ b/test/CodeGen/AArch64/arm64-csldst-mmo.ll
@@ -13,9 +13,9 @@
; CHECK: SU(2): STRWui %WZR
; CHECK: SU(3): %X21<def>, %X20<def> = LDPXi %SP
; CHECK: Predecessors:
-; CHECK-NEXT: out SU(0)
-; CHECK-NEXT: out SU(0)
-; CHECK-NEXT: ord SU(0)
+; CHECK-NEXT: SU(0): Out
+; CHECK-NEXT: SU(0): Out
+; CHECK-NEXT: SU(0): Ord
; CHECK-NEXT: Successors:
define void @test1() {
entry:
diff --git a/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll b/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
index cde62fcb3f95..ad4feef7280f 100644
--- a/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
+++ b/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
@@ -8,8 +8,8 @@
; CHECK: shiftable
; CHECK: SU(2): %vreg2<def> = SUBXri %vreg1, 20, 0
; CHECK: Successors:
-; CHECK-NEXT: data SU(4): Latency=1 Reg=%vreg2
-; CHECK-NEXT: data SU(3): Latency=2 Reg=%vreg2
+; CHECK-NEXT: SU(4): Data Latency=1 Reg=%vreg2
+; CHECK-NEXT: SU(3): Data Latency=2 Reg=%vreg2
; CHECK: ********** INTERVALS **********
define i64 @shiftable(i64 %A, i64 %B) {
%tmp0 = sub i64 %B, 20
diff --git a/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
index 748a4762d82f..9cbf0cb3803a 100644
--- a/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
+++ b/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
@@ -7,11 +7,11 @@
; CHECK: misched_bug:BB#0 entry
; CHECK: SU(2): %vreg2<def> = LDRWui %vreg0, 1; mem:LD4[%ptr1_plus1] GPR32:%vreg2 GPR64common:%vreg0
; CHECK: Successors:
-; CHECK-NEXT: data SU(5): Latency=4 Reg=%vreg2
-; CHECK-NEXT: ord SU(4): Latency=0
+; CHECK-NEXT: SU(5): Data Latency=4 Reg=%vreg2
+; CHECK-NEXT: SU(4): Ord Latency=0
; CHECK: SU(3): STRWui %WZR, %vreg0, 0; mem:ST4[%ptr1] GPR64common:%vreg0
; CHECK: Successors:
-; CHECK: ord SU(4): Latency=0
+; CHECK: SU(4): Ord Latency=0
; CHECK: SU(4): STRWui %WZR, %vreg1, 0; mem:ST4[%ptr2] GPR64common:%vreg1
; CHECK: SU(5): %W0<def> = COPY %vreg2; GPR32:%vreg2
; CHECK: ** ScheduleDAGMI::schedule picking next node
diff --git a/test/CodeGen/AArch64/fence-singlethread.ll b/test/CodeGen/AArch64/fence-singlethread.ll
index 2ed744277385..0af0e58a91d4 100644
--- a/test/CodeGen/AArch64/fence-singlethread.ll
+++ b/test/CodeGen/AArch64/fence-singlethread.ll
@@ -16,6 +16,6 @@ define void @fence_singlethread() {
; IOS: ; COMPILER BARRIER
; IOS-NOT: dmb
- fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
ret void
}
diff --git a/test/CodeGen/AArch64/preferred-function-alignment.ll b/test/CodeGen/AArch64/preferred-function-alignment.ll
new file mode 100644
index 000000000000..88e6f5dd01c9
--- /dev/null
+++ b/test/CodeGen/AArch64/preferred-function-alignment.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=generic < %s | FileCheck --check-prefix=ALIGN2 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a35 < %s | FileCheck --check-prefix=ALIGN2 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a53 < %s | FileCheck --check-prefix=ALIGN2 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a73 < %s | FileCheck --check-prefix=ALIGN2 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cyclone < %s | FileCheck --check-prefix=ALIGN2 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=falkor < %s | FileCheck --check-prefix=ALIGN2 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=kryo < %s | FileCheck --check-prefix=ALIGN2 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=thunderx < %s | FileCheck --check-prefix=ALIGN3 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=thunderxt81 < %s | FileCheck --check-prefix=ALIGN3 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=thunderxt83 < %s | FileCheck --check-prefix=ALIGN3 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=thunderxt88 < %s | FileCheck --check-prefix=ALIGN3 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=thunderx2t99 < %s | FileCheck --check-prefix=ALIGN3 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a57 < %s | FileCheck --check-prefix=ALIGN4 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a72 < %s | FileCheck --check-prefix=ALIGN4 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=exynos-m1 < %s | FileCheck --check-prefix=ALIGN4 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=exynos-m2 < %s | FileCheck --check-prefix=ALIGN4 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=exynos-m3 < %s | FileCheck --check-prefix=ALIGN4 %s
+
+define void @test() {
+ ret void
+}
+
+; CHECK-LABEL: test
+; ALIGN2: .p2align 2
+; ALIGN3: .p2align 3
+; ALIGN4: .p2align 4
diff --git a/test/CodeGen/AArch64/tailcall_misched_graph.ll b/test/CodeGen/AArch64/tailcall_misched_graph.ll
index 4fbd8944f032..7e76dac214a1 100644
--- a/test/CodeGen/AArch64/tailcall_misched_graph.ll
+++ b/test/CodeGen/AArch64/tailcall_misched_graph.ll
@@ -37,8 +37,8 @@ declare void @callee2(i8*, i8*, i8*, i8*, i8*,
; CHECK: SU({{.*}}): [[VRB]]<def> = LDRXui <fi#-2>
; CHECK-NOT: SU
; CHECK: Successors:
-; CHECK: ord SU([[DEPSTOREB:.*]]): Latency=0
-; CHECK: ord SU([[DEPSTOREA:.*]]): Latency=0
+; CHECK: SU([[DEPSTOREB:.*]]): Ord Latency=0
+; CHECK: SU([[DEPSTOREA:.*]]): Ord Latency=0
; CHECK: SU([[DEPSTOREA]]): STRXui %vreg{{.*}}, <fi#-4>
; CHECK: SU([[DEPSTOREB]]): STRXui %vreg{{.*}}, <fi#-3>
diff --git a/test/CodeGen/AMDGPU/add.i16.ll b/test/CodeGen/AMDGPU/add.i16.ll
index bee13d8c17f1..98848295a73b 100644
--- a/test/CodeGen/AMDGPU/add.i16.ll
+++ b/test/CodeGen/AMDGPU/add.i16.ll
@@ -4,7 +4,7 @@
; GCN-LABEL: {{^}}v_test_add_i16:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; VI-NEXT: buffer_store_short [[ADD]]
define amdgpu_kernel void @v_test_add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -67,7 +67,7 @@ define amdgpu_kernel void @v_test_add_i16_inline_neg1(i16 addrspace(1)* %out, i1
; GCN-LABEL: {{^}}v_test_add_i16_zext_to_i32:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; VI-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_test_add_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -86,7 +86,7 @@ define amdgpu_kernel void @v_test_add_i16_zext_to_i32(i32 addrspace(1)* %out, i1
; GCN-LABEL: {{^}}v_test_add_i16_zext_to_i64:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI-DAG: v_add_u16_e32 v[[ADD:[0-9]+]], [[B]], [[A]]
+; VI-DAG: v_add_u16_e32 v[[ADD:[0-9]+]], [[A]], [[B]]
; VI: buffer_store_dwordx2 v{{\[}}[[ADD]]:{{[0-9]+\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}}
define amdgpu_kernel void @v_test_add_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -105,7 +105,7 @@ define amdgpu_kernel void @v_test_add_i16_zext_to_i64(i64 addrspace(1)* %out, i1
; GCN-LABEL: {{^}}v_test_add_i16_sext_to_i32:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; VI-NEXT: v_bfe_i32 [[SEXT:v[0-9]+]], [[ADD]], 0, 16
; VI-NEXT: buffer_store_dword [[SEXT]]
define amdgpu_kernel void @v_test_add_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
@@ -125,7 +125,7 @@ define amdgpu_kernel void @v_test_add_i16_sext_to_i32(i32 addrspace(1)* %out, i1
; GCN-LABEL: {{^}}v_test_add_i16_sext_to_i64:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; VI-NEXT: v_bfe_i32 v[[LO:[0-9]+]], [[ADD]], 0, 16
; VI-NEXT: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; VI-NEXT: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
diff --git a/test/CodeGen/AMDGPU/add.ll b/test/CodeGen/AMDGPU/add.ll
index 7e4546d2cfb3..6dcd7c234dc6 100644
--- a/test/CodeGen/AMDGPU/add.ll
+++ b/test/CodeGen/AMDGPU/add.ll
@@ -5,9 +5,9 @@
;FUNC-LABEL: {{^}}test1:
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI: v_add_i32_e32 [[REG:v[0-9]+]], vcc, {{v[0-9]+, v[0-9]+}}
-;SI-NOT: [[REG]]
-;SI: buffer_store_dword [[REG]],
+;SI: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}}
+;SI: v_mov_b32_e32 v[[REG]], s[[REG]]
+;SI: buffer_store_dword v[[REG]],
define amdgpu_kernel void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
@@ -21,8 +21,8 @@ define amdgpu_kernel void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
+;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
@@ -39,10 +39,10 @@ define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspa
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
+;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/AMDGPU/add.v2i16.ll b/test/CodeGen/AMDGPU/add.v2i16.ll
index 76f724c2b90b..4baa35ca57c5 100644
--- a/test/CodeGen/AMDGPU/add.v2i16.ll
+++ b/test/CodeGen/AMDGPU/add.v2i16.ll
@@ -168,10 +168,10 @@ define amdgpu_kernel void @v_test_add_v2i16_inline_fp_split(<2 x i16> addrspace(
; VI: flat_load_ushort v[[B_HI:[0-9]+]]
; VI: flat_load_ushort v[[B_LO:[0-9]+]]
-; VI: v_add_u16_e32 v[[ADD_HI:[0-9]+]], v[[B_HI]], v[[A_HI]]
+; VI: v_add_u16_e32 v[[ADD_HI:[0-9]+]], v[[A_HI]], v[[B_HI]]
; VI-NOT: and
; VI-NOT: shl
-; VI: v_add_u16_e32 v[[ADD_LO:[0-9]+]], v[[B_LO]], v[[A_LO]]
+; VI: v_add_u16_e32 v[[ADD_LO:[0-9]+]], v[[A_LO]], v[[B_LO]]
; VI-NOT: and
; VI-NOT: shl
; VI: buffer_store_dwordx2 v{{\[}}[[ADD_LO]]:[[ADD_HI]]{{\]}}
diff --git a/test/CodeGen/AMDGPU/add_i128.ll b/test/CodeGen/AMDGPU/add_i128.ll
index 00a125c2e44f..d33965d4dda7 100644
--- a/test/CodeGen/AMDGPU/add_i128.ll
+++ b/test/CodeGen/AMDGPU/add_i128.ll
@@ -19,10 +19,10 @@ define amdgpu_kernel void @test_i128_vreg(i128 addrspace(1)* noalias %out, i128
; Check that the SGPR add operand is correctly moved to a VGPR.
; GCN-LABEL: {{^}}sgpr_operand:
-; GCN: v_add_i32
-; GCN: v_addc_u32
-; GCN: v_addc_u32
-; GCN: v_addc_u32
+; GCN: s_add_u32
+; GCN: s_addc_u32
+; GCN: s_addc_u32
+; GCN: s_addc_u32
define amdgpu_kernel void @sgpr_operand(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in, i128 %a) {
%foo = load i128, i128 addrspace(1)* %in, align 8
%result = add i128 %foo, %a
@@ -31,10 +31,10 @@ define amdgpu_kernel void @sgpr_operand(i128 addrspace(1)* noalias %out, i128 ad
}
; GCN-LABEL: {{^}}sgpr_operand_reversed:
-; GCN: v_add_i32
-; GCN: v_addc_u32
-; GCN: v_addc_u32
-; GCN: v_addc_u32
+; GCN: s_add_u32
+; GCN: s_addc_u32
+; GCN: s_addc_u32
+; GCN: s_addc_u32
define amdgpu_kernel void @sgpr_operand_reversed(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in, i128 %a) {
%foo = load i128, i128 addrspace(1)* %in, align 8
%result = add i128 %a, %foo
diff --git a/test/CodeGen/AMDGPU/add_i64.ll b/test/CodeGen/AMDGPU/add_i64.ll
index 62733d5bfb6c..f673d91192b8 100644
--- a/test/CodeGen/AMDGPU/add_i64.ll
+++ b/test/CodeGen/AMDGPU/add_i64.ll
@@ -19,8 +19,8 @@ define amdgpu_kernel void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 add
; Check that the SGPR add operand is correctly moved to a VGPR.
; SI-LABEL: {{^}}sgpr_operand:
-; SI: v_add_i32
-; SI: v_addc_u32
+; SI: s_add_u32
+; SI: s_addc_u32
define amdgpu_kernel void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 addrspace(1)* noalias %in_bar, i64 %a) {
%foo = load i64, i64 addrspace(1)* %in, align 8
%result = add i64 %foo, %a
@@ -32,8 +32,8 @@ define amdgpu_kernel void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addr
; SGPR as other operand.
;
; SI-LABEL: {{^}}sgpr_operand_reversed:
-; SI: v_add_i32
-; SI: v_addc_u32
+; SI: s_add_u32
+; SI: s_addc_u32
define amdgpu_kernel void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %a) {
%foo = load i64, i64 addrspace(1)* %in, align 8
%result = add i64 %a, %foo
diff --git a/test/CodeGen/AMDGPU/addrspacecast.ll b/test/CodeGen/AMDGPU/addrspacecast.ll
index b1e71722d80c..a6aa9e795151 100644
--- a/test/CodeGen/AMDGPU/addrspacecast.ll
+++ b/test/CodeGen/AMDGPU/addrspacecast.ll
@@ -10,20 +10,22 @@
; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}}
; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
+; CI-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
+; CI-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
+; CI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
+; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(15, 16, 16)
; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16
; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_SHARED_BASE]]
; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_shared_base
-
-; HSA-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-
-; HSA-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
-; HSA-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]]
-; HSA-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
-; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
+; GFX9: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
+; GFX9: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
+; GFX9-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]]
@@ -48,6 +50,12 @@ define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %pt
; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}}
; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
+; CI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
+; CI-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], 0
+; CI-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
+; CI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
+
; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
; GFX9-DAG: s_getreg_b32 [[SSRC_PRIVATE:s[0-9]+]], hwreg(15, 0, 16)
; GFX9-DAG: s_lshl_b32 [[SSRC_PRIVATE_BASE:s[0-9]+]], [[SSRC_PRIVATE]], 16
@@ -55,12 +63,11 @@ define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %pt
; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_private_base
-; HSA-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-
-; HSA-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], 0
-; HSA-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]]
-; HSA-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
-; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
+; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
+; GFX9: v_cmp_ne_u32_e64 vcc, [[PTR]], 0
+; GFX9: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
+; GFX9: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]]
diff --git a/test/CodeGen/AMDGPU/alignbit-pat.ll b/test/CodeGen/AMDGPU/alignbit-pat.ll
index ff5c8960fad3..3f07188063cd 100644
--- a/test/CodeGen/AMDGPU/alignbit-pat.ll
+++ b/test/CodeGen/AMDGPU/alignbit-pat.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}alignbit_shr_pat:
; GCN-DAG: s_load_dword s[[SHR:[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll b/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
index 0e5605961e10..0c7160df2b96 100644
--- a/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
+++ b/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
@@ -16,8 +16,8 @@ define amdgpu_kernel void @noop_fdiv_fpmath(float addrspace(1)* %out, float %a,
; CHECK: %md.1ulp = fdiv float %a, %b, !fpmath !2
; CHECK: %md.25ulp = call float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
; CHECK: %md.3ulp = call float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !3
-; CHECK: %fast.md.25ulp = call fast float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
-; CHECK: arcp.md.25ulp = call arcp float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
+; CHECK: %fast.md.25ulp = fdiv fast float %a, %b, !fpmath !0
+; CHECK: arcp.md.25ulp = fdiv arcp float %a, %b, !fpmath !0
define amdgpu_kernel void @fdiv_fpmath(float addrspace(1)* %out, float %a, float %b) #1 {
%no.md = fdiv float %a, %b
store volatile float %no.md, float addrspace(1)* %out
@@ -110,15 +110,8 @@ define amdgpu_kernel void @fdiv_fpmath_vector(<2 x float> addrspace(1)* %out, <2
; CHECK: %md.half.ulp = fdiv <2 x float> <float 1.000000e+00, float 1.000000e+00>, %x, !fpmath !1
; CHECK: %arcp.no.md = fdiv arcp <2 x float> <float 1.000000e+00, float 1.000000e+00>, %x{{$}}
; CHECK: %fast.no.md = fdiv fast <2 x float> <float 1.000000e+00, float 1.000000e+00>, %x{{$}}
-
-; CHECK: extractelement <2 x float> %x
-; CHECK: fdiv arcp float 1.000000e+00, %{{[0-9]+}}, !fpmath !0
-; CHECK: extractelement <2 x float> %x
-; CHECK: fdiv arcp float 1.000000e+00, %{{[0-9]+}}, !fpmath !0
-; CHECK: store volatile <2 x float> %arcp.25ulp
-
-; CHECK: fdiv fast float 1.000000e+00, %{{[0-9]+}}, !fpmath !0
-; CHECK: fdiv fast float 1.000000e+00, %{{[0-9]+}}, !fpmath !0
+; CHECK: %arcp.25ulp = fdiv arcp <2 x float> <float 1.000000e+00, float 1.000000e+00>, %x, !fpmath !0
+; CHECK: %fast.25ulp = fdiv fast <2 x float> <float 1.000000e+00, float 1.000000e+00>, %x, !fpmath !0
; CHECK: store volatile <2 x float> %fast.25ulp, <2 x float> addrspace(1)* %out
define amdgpu_kernel void @rcp_fdiv_fpmath_vector(<2 x float> addrspace(1)* %out, <2 x float> %x) #1 {
%no.md = fdiv <2 x float> <float 1.0, float 1.0>, %x
@@ -146,17 +139,8 @@ define amdgpu_kernel void @rcp_fdiv_fpmath_vector(<2 x float> addrspace(1)* %out
; CHECK: %no.md = fdiv <2 x float> <float 1.000000e+00, float 2.000000e+00>, %x
; CHECK: %arcp.no.md = fdiv arcp <2 x float> <float 1.000000e+00, float 2.000000e+00>, %x
; CHECK: %fast.no.md = fdiv fast <2 x float> <float 1.000000e+00, float 2.000000e+00>, %x{{$}}
-
-; CHECK: %[[X0:[0-9]+]] = extractelement <2 x float> %x, i64 0
-; CHECK: fdiv arcp float 1.000000e+00, %[[X0]], !fpmath !0
-; CHECK: %[[X1:[0-9]+]] = extractelement <2 x float> %x, i64 1
-; CHECK: fdiv arcp float 2.000000e+00, %[[X1]], !fpmath !0
-; CHECK: store volatile <2 x float> %arcp.25ulp
-
-; CHECK: %[[X0:[0-9]+]] = extractelement <2 x float> %x, i64 0
-; CHECK: fdiv fast float 1.000000e+00, %[[X0]], !fpmath !0
-; CHECK: %[[X1:[0-9]+]] = extractelement <2 x float> %x, i64 1
-; CHECK: fdiv fast float 2.000000e+00, %[[X1]], !fpmath !0
+; CHECK: %arcp.25ulp = fdiv arcp <2 x float> <float 1.000000e+00, float 2.000000e+00>, %x, !fpmath !0
+; CHECK: %fast.25ulp = fdiv fast <2 x float> <float 1.000000e+00, float 2.000000e+00>, %x, !fpmath !0
; CHECK: store volatile <2 x float> %fast.25ulp
define amdgpu_kernel void @rcp_fdiv_fpmath_vector_nonsplat(<2 x float> addrspace(1)* %out, <2 x float> %x) #1 {
%no.md = fdiv <2 x float> <float 1.0, float 2.0>, %x
@@ -179,12 +163,10 @@ define amdgpu_kernel void @rcp_fdiv_fpmath_vector_nonsplat(<2 x float> addrspace
; FIXME: Should be able to get fdiv for 1.0 component
; CHECK-LABEL: @rcp_fdiv_fpmath_vector_partial_constant(
-; CHECK: call arcp float @llvm.amdgcn.fdiv.fast(float %{{[0-9]+}}, float %{{[0-9]+}}), !fpmath !0
-; CHECK: call arcp float @llvm.amdgcn.fdiv.fast(float %{{[0-9]+}}, float %{{[0-9]+}}), !fpmath !0
+; CHECK: %arcp.25ulp = fdiv arcp <2 x float> %x.insert, %y, !fpmath !0
; CHECK: store volatile <2 x float> %arcp.25ulp
-; CHECK: call fast float @llvm.amdgcn.fdiv.fast(float %{{[0-9]+}}, float %{{[0-9]+}}), !fpmath !0
-; CHECK: call fast float @llvm.amdgcn.fdiv.fast(float %{{[0-9]+}}, float %{{[0-9]+}}), !fpmath !0
+; CHECK: %fast.25ulp = fdiv fast <2 x float> %x.insert, %y, !fpmath !0
; CHECK: store volatile <2 x float> %fast.25ulp
define amdgpu_kernel void @rcp_fdiv_fpmath_vector_partial_constant(<2 x float> addrspace(1)* %out, <2 x float> %x, <2 x float> %y) #1 {
%x.insert = insertelement <2 x float> %x, float 1.0, i32 0
@@ -204,8 +186,8 @@ define amdgpu_kernel void @rcp_fdiv_fpmath_vector_partial_constant(<2 x float> a
; CHECK: %md.1ulp = fdiv float %a, %b, !fpmath !2
; CHECK: %md.25ulp = fdiv float %a, %b, !fpmath !0
; CHECK: %md.3ulp = fdiv float %a, %b, !fpmath !3
-; CHECK: call fast float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
-; CHECK: call arcp float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
+; CHECK: %fast.md.25ulp = fdiv fast float %a, %b, !fpmath !0
+; CHECK: %arcp.md.25ulp = fdiv arcp float %a, %b, !fpmath !0
define amdgpu_kernel void @fdiv_fpmath_f32_denormals(float addrspace(1)* %out, float %a, float %b) #2 {
%no.md = fdiv float %a, %b
store volatile float %no.md, float addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/and-gcn.ll b/test/CodeGen/AMDGPU/and-gcn.ll
index 2aec03aff8a3..ef11ae87267e 100644
--- a/test/CodeGen/AMDGPU/and-gcn.ll
+++ b/test/CodeGen/AMDGPU/and-gcn.ll
@@ -2,8 +2,7 @@
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}v_and_i64_br:
-; SI: v_and_b32
-; SI: v_and_b32
+; SI: s_and_b64
define amdgpu_kernel void @v_and_i64_br(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
entry:
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
diff --git a/test/CodeGen/AMDGPU/and.ll b/test/CodeGen/AMDGPU/and.ll
index c356f8b87cfc..ee0190149e92 100644
--- a/test/CodeGen/AMDGPU/and.ll
+++ b/test/CodeGen/AMDGPU/and.ll
@@ -8,8 +8,8 @@ declare i32 @llvm.r600.read.tidig.x() #0
; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
@@ -26,10 +26,11 @@ define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspa
; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+
+; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
@@ -136,7 +137,9 @@ define amdgpu_kernel void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrs
; FUNC-LABEL: {{^}}v_and_constant_i32
; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
define amdgpu_kernel void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %a = load i32, i32 addrspace(1)* %gep, align 4
%and = and i32 %a, 1234567
store i32 %and, i32 addrspace(1)* %out, align 4
ret void
@@ -145,7 +148,9 @@ define amdgpu_kernel void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrsp
; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
define amdgpu_kernel void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %a = load i32, i32 addrspace(1)* %gep, align 4
%and = and i32 %a, 64
store i32 %and, i32 addrspace(1)* %out, align 4
ret void
@@ -154,7 +159,9 @@ define amdgpu_kernel void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 a
; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
define amdgpu_kernel void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %a = load i32, i32 addrspace(1)* %gep, align 4
%and = and i32 %a, -16
store i32 %and, i32 addrspace(1)* %out, align 4
ret void
@@ -239,8 +246,11 @@ define amdgpu_kernel void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out
; SI: v_and_b32
; SI: v_and_b32
define amdgpu_kernel void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
- %a = load i64, i64 addrspace(1)* %aptr, align 8
- %b = load i64, i64 addrspace(1)* %bptr, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %a = load i64, i64 addrspace(1)* %gep.a, align 8
+ %gep.b = getelementptr i64, i64 addrspace(1)* %bptr, i32 %tid
+ %b = load i64, i64 addrspace(1)* %gep.b, align 8
%and = and i64 %a, %b
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -251,7 +261,9 @@ define amdgpu_kernel void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0x11e, {{v[0-9]+}}
; SI: buffer_store_dwordx2
define amdgpu_kernel void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
- %a = load i64, i64 addrspace(1)* %aptr, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %a = load i64, i64 addrspace(1)* %gep.a, align 8
%and = and i64 %a, 1231231234567
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -299,26 +311,30 @@ define amdgpu_kernel void @v_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out
}
; FUNC-LABEL: {{^}}v_and_i64_32_bit_constant:
-; SI: buffer_load_dword [[VAL:v[0-9]+]]
+; SI: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
; SI-NOT: and
; SI: v_and_b32_e32 {{v[0-9]+}}, 0x12d687, [[VAL]]
; SI-NOT: and
; SI: buffer_store_dwordx2
define amdgpu_kernel void @v_and_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
- %a = load i64, i64 addrspace(1)* %aptr, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %a = load i64, i64 addrspace(1)* %gep.a, align 8
%and = and i64 %a, 1234567
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
}
; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
-; SI: buffer_load_dword v{{[0-9]+}}
+; SI: {{buffer|flat}}_load_dword v{{[0-9]+}}
; SI-NOT: and
; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
; SI-NOT: and
; SI: buffer_store_dwordx2
define amdgpu_kernel void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
- %a = load i64, i64 addrspace(1)* %aptr, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %a = load i64, i64 addrspace(1)* %gep.a, align 8
%and = and i64 %a, 64
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -326,13 +342,15 @@ define amdgpu_kernel void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addr
; FIXME: Should be able to reduce load width
; FUNC-LABEL: {{^}}v_and_inline_neg_imm_i64:
-; SI: buffer_load_dwordx2 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
+; SI: {{buffer|flat}}_load_dwordx2 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
; SI-NOT: and
; SI: v_and_b32_e32 v[[VAL_LO]], -8, v[[VAL_LO]]
; SI-NOT: and
; SI: buffer_store_dwordx2 v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}
define amdgpu_kernel void @v_and_inline_neg_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
- %a = load i64, i64 addrspace(1)* %aptr, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %a = load i64, i64 addrspace(1)* %gep.a, align 8
%and = and i64 %a, -8
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -549,5 +567,4 @@ define amdgpu_kernel void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
}
-
attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/any_extend_vector_inreg.ll b/test/CodeGen/AMDGPU/any_extend_vector_inreg.ll
index c61c23222bc7..cdc60ab504e0 100644
--- a/test/CodeGen/AMDGPU/any_extend_vector_inreg.ll
+++ b/test/CodeGen/AMDGPU/any_extend_vector_inreg.ll
@@ -2,9 +2,9 @@
; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}any_extend_vector_inreg_v16i8_to_v4i32:
-; GCN: {{buffer|flat}}_load_dwordx4
-; GCN-DAG: {{buffer|flat}}_load_dwordx4
-; GCN-DAG: {{buffer|flat}}_load_dword
+; GCN: s_load_dwordx4
+; GCN-DAG: s_load_dwordx4
+; GCN-DAG: s_load_dword
; GCN: {{buffer|flat}}_store_byte
; GCN: {{buffer|flat}}_store_byte
diff --git a/test/CodeGen/AMDGPU/bitreverse.ll b/test/CodeGen/AMDGPU/bitreverse.ll
index 539373f7bdeb..f29bfb46b94b 100644
--- a/test/CodeGen/AMDGPU/bitreverse.ll
+++ b/test/CodeGen/AMDGPU/bitreverse.ll
@@ -2,6 +2,8 @@
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC %s
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
declare i16 @llvm.bitreverse.i16(i16) #1
declare i32 @llvm.bitreverse.i32(i32) #1
declare i64 @llvm.bitreverse.i64(i64) #1
@@ -42,12 +44,14 @@ define amdgpu_kernel void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val)
}
; FUNC-LABEL: {{^}}v_brev_i32:
-; SI: buffer_load_dword [[VAL:v[0-9]+]],
+; SI: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; SI: v_bfrev_b32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
define amdgpu_kernel void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) #0 {
- %val = load i32, i32 addrspace(1)* %valptr
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %gep
%brev = call i32 @llvm.bitreverse.i32(i32 %val) #1
store i32 %brev, i32 addrspace(1)* %out
ret void
@@ -66,7 +70,9 @@ define amdgpu_kernel void @s_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2
; SI: v_bfrev_b32_e32
; SI: v_bfrev_b32_e32
define amdgpu_kernel void @v_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) #0 {
- %val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %valptr, i32 %tid
+ %val = load <2 x i32>, <2 x i32> addrspace(1)* %gep
%brev = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %val) #1
store <2 x i32> %brev, <2 x i32> addrspace(1)* %out
ret void
@@ -82,7 +88,9 @@ define amdgpu_kernel void @s_brev_i64(i64 addrspace(1)* noalias %out, i64 %val)
; FUNC-LABEL: {{^}}v_brev_i64:
; SI-NOT: v_or_b32_e64 v{{[0-9]+}}, 0, 0
define amdgpu_kernel void @v_brev_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %valptr) #0 {
- %val = load i64, i64 addrspace(1)* %valptr
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i64, i64 addrspace(1)* %valptr, i32 %tid
+ %val = load i64, i64 addrspace(1)* %gep
%brev = call i64 @llvm.bitreverse.i64(i64 %val) #1
store i64 %brev, i64 addrspace(1)* %out
ret void
@@ -97,7 +105,9 @@ define amdgpu_kernel void @s_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2
; FUNC-LABEL: {{^}}v_brev_v2i64:
define amdgpu_kernel void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %valptr) #0 {
- %val = load <2 x i64>, <2 x i64> addrspace(1)* %valptr
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <2 x i64> , <2 x i64> addrspace(1)* %valptr, i32 %tid
+ %val = load <2 x i64>, <2 x i64> addrspace(1)* %gep
%brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1
store <2 x i64> %brev, <2 x i64> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/bswap.ll b/test/CodeGen/AMDGPU/bswap.ll
index d2dacd7c17b3..eb3fc2fab34f 100644
--- a/test/CodeGen/AMDGPU/bswap.ll
+++ b/test/CodeGen/AMDGPU/bswap.ll
@@ -10,7 +10,7 @@ declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) nounwind readnone
declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>) nounwind readnone
; FUNC-LABEL: @test_bswap_i32
-; SI: buffer_load_dword [[VAL:v[0-9]+]]
+; SI: s_load_dword [[VAL:s[0-9]+]]
; SI-DAG: v_alignbit_b32 [[TMP0:v[0-9]+]], [[VAL]], [[VAL]], 8
; SI-DAG: v_alignbit_b32 [[TMP1:v[0-9]+]], [[VAL]], [[VAL]], 24
; SI-DAG: s_mov_b32 [[K:s[0-9]+]], 0xff00ff
diff --git a/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll b/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
index 5dec3e35ab3d..c114332a5887 100644
--- a/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
+++ b/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
@@ -1,9 +1,9 @@
; RUN: opt -S -codegenprepare -mtriple=amdgcn-unknown-unknown -mcpu=bonaire < %s | FileCheck -check-prefix=OPT -check-prefix=OPT-CI -check-prefix=OPT-CIVI %s
; RUN: opt -S -codegenprepare -mtriple=amdgcn-unknown-unknown -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=OPT -check-prefix=OPT-VI -check-prefix=OPT-CIVI %s
; RUN: opt -S -codegenprepare -mtriple=amdgcn-unknown-unknown -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=OPT -check-prefix=OPT-GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=-promote-alloca < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -mattr=-promote-alloca < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -march=amdgcn -amdgpu-scalarize-global-loads=false -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
+; RUN: llc -march=amdgcn -amdgpu-scalarize-global-loads=false -mcpu=tonga -mattr=-flat-for-global -mattr=-promote-alloca < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
+; RUN: llc -march=amdgcn -amdgpu-scalarize-global-loads=false -mcpu=gfx900 -mattr=-flat-for-global -mattr=-promote-alloca < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
; OPT-LABEL: @test_no_sink_flat_small_offset_i32(
; OPT-CIVI: getelementptr i32, i32 addrspace(4)* %in
@@ -40,7 +40,7 @@ done:
; OPT-LABEL: @test_sink_noop_addrspacecast_flat_to_global_i32(
; OPT: getelementptr i32, i32 addrspace(4)* %out,
-; OPT-CI-NOT: getelementptr
+; rOPT-CI-NOT: getelementptr
; OPT: br i1
; OPT-CI: addrspacecast
diff --git a/test/CodeGen/AMDGPU/cgp-addressing-modes.ll b/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
index c1cf56e5058e..c01d834bc33d 100644
--- a/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
+++ b/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
@@ -1,9 +1,9 @@
; RUN: opt -S -codegenprepare -mtriple=amdgcn-unknown-unknown -mcpu=tahiti < %s | FileCheck -check-prefix=OPT -check-prefix=OPT-SI %s
; RUN: opt -S -codegenprepare -mtriple=amdgcn-unknown-unknown -mcpu=bonaire < %s | FileCheck -check-prefix=OPT -check-prefix=OPT-CI %s
; RUN: opt -S -codegenprepare -mtriple=amdgcn-unknown-unknown -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=OPT -check-prefix=OPT-VI %s
-; RUN: llc -march=amdgcn -mcpu=tahiti -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -amdgpu-scalarize-global-loads=false -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -amdgpu-scalarize-global-loads=false -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-scalarize-global-loads=false -mattr=-flat-for-global -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
diff --git a/test/CodeGen/AMDGPU/clamp-omod-special-case.mir b/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
index 6ecf75c1acec..90fba0342090 100644
--- a/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
+++ b/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
@@ -1,36 +1,4 @@
# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s
---- |
- define amdgpu_ps void @v_max_self_clamp_not_set_f32() #0 {
- ret void
- }
-
- define amdgpu_ps void @v_clamp_omod_already_set_f32() #0 {
- ret void
- }
-
- define amdgpu_ps void @v_omod_mul_omod_already_set_f32() #0 {
- ret void
- }
-
- define amdgpu_ps void @v_omod_mul_clamp_already_set_f32() #0 {
- ret void
- }
-
- define amdgpu_ps void @v_omod_add_omod_already_set_f32() #0 {
- ret void
- }
-
- define amdgpu_ps void @v_omod_add_clamp_already_set_f32() #0 {
- ret void
- }
-
- define amdgpu_ps void @v_max_reg_imm_f32() #0 {
- ret void
- }
-
- attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" }
-
-...
---
# GCN-LABEL: name: v_max_self_clamp_not_set_f32
# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
@@ -70,7 +38,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@@ -132,7 +100,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@@ -195,7 +163,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@@ -260,7 +228,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@@ -337,7 +305,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@@ -402,7 +370,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@@ -435,7 +403,7 @@ registers:
- { id: 0, class: vgpr_32 }
- { id: 1, class: vgpr_32 }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %vgpr0
%0 = COPY %vgpr0
diff --git a/test/CodeGen/AMDGPU/coalescer_remat.ll b/test/CodeGen/AMDGPU/coalescer_remat.ll
index 3e1b76a1df09..14b798ba822b 100644
--- a/test/CodeGen/AMDGPU/coalescer_remat.ll
+++ b/test/CodeGen/AMDGPU/coalescer_remat.ll
@@ -12,7 +12,7 @@ declare float @llvm.fma.f32(float, float, float)
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0
; It's probably OK if this is slightly higher:
-; CHECK: ; NumVgprs: 8
+; CHECK: ; NumVgprs: 4
define amdgpu_kernel void @foobar(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in, i32 %flag) {
entry:
%cmpflag = icmp eq i32 %flag, 1
diff --git a/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
index ed78ccc9b617..0401f7b07e21 100644
--- a/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
+++ b/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -1,84 +1,5 @@
# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs -run-pass si-fold-operands,dead-mi-elimination -o - %s | FileCheck -check-prefix=GCN %s
---- |
- define amdgpu_kernel void @s_fold_and_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
- %and = and i32 %a, 1234567
- store volatile i32 %and, i32 addrspace(1)* %out
- ret void
- }
-
- define amdgpu_kernel void @v_fold_and_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %idxprom = sext i32 %tid to i64
- %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
- %gep.out = getelementptr i32, i32 addrspace(1)* %out, i64 %idxprom
- %a = load i32, i32 addrspace(1)* %gep.a
- %and = and i32 %a, 1234567
- store i32 %and, i32 addrspace(1)* %gep.out
- ret void
- }
-
- define amdgpu_kernel void @s_fold_shl_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
- %shl = shl i32 %a, 12
- store volatile i32 %shl, i32 addrspace(1)* %out
- ret void
- }
-
- define amdgpu_kernel void @v_fold_shl_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %idxprom = sext i32 %tid to i64
- %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
- %gep.out = getelementptr i32, i32 addrspace(1)* %out, i64 %idxprom
- %a = load i32, i32 addrspace(1)* %gep.a
- %shl = shl i32 %a, 12
- store i32 %shl, i32 addrspace(1)* %gep.out
- ret void
- }
-
- define amdgpu_kernel void @s_fold_ashr_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
- %ashr = ashr i32 %a, 12
- store volatile i32 %ashr, i32 addrspace(1)* %out
- ret void
- }
-
- define amdgpu_kernel void @v_fold_ashr_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %idxprom = sext i32 %tid to i64
- %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
- %gep.out = getelementptr i32, i32 addrspace(1)* %out, i64 %idxprom
- %a = load i32, i32 addrspace(1)* %gep.a
- %ashr = ashr i32 %a, 12
- store i32 %ashr, i32 addrspace(1)* %gep.out
- ret void
- }
-
- define amdgpu_kernel void @s_fold_lshr_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
- %lshr = lshr i32 %a, 12
- store volatile i32 %lshr, i32 addrspace(1)* %out
- ret void
- }
-
- define amdgpu_kernel void @v_fold_lshr_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %idxprom = sext i32 %tid to i64
- %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
- %gep.out = getelementptr i32, i32 addrspace(1)* %out, i64 %idxprom
- %a = load i32, i32 addrspace(1)* %gep.a
- %lshr = lshr i32 %a, 12
- store i32 %lshr, i32 addrspace(1)* %gep.out
- ret void
- }
-
- define amdgpu_kernel void @undefined_vreg_operand() {
- unreachable
- }
-
- declare i32 @llvm.amdgcn.workitem.id.x() #1
-
- attributes #0 = { nounwind }
- attributes #1 = { nounwind readnone }
-
...
----
# GCN-LABEL: name: s_fold_and_imm_regimm_32{{$}}
# GCN: %10 = V_MOV_B32_e32 1543, implicit %exec
@@ -119,11 +40,11 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1
%0 = COPY %sgpr0_sgpr1
- %1 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %1 = S_LOAD_DWORDX2_IMM %0, 36, 0
%2 = COPY %1.sub1
%3 = COPY %1.sub0
%4 = S_MOV_B32 61440
@@ -133,7 +54,7 @@ body: |
%8 = S_MOV_B32 9999
%9 = S_AND_B32 killed %7, killed %8, implicit-def dead %scc
%10 = COPY %9
- BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out)
+ BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@@ -204,12 +125,12 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
%31 = V_ASHRREV_I32_e64 31, %3, implicit %exec
%32 = REG_SEQUENCE %3, 1, %31, 2
%33 = V_LSHLREV_B64 2, killed %32, implicit %exec
@@ -223,19 +144,19 @@ body: |
%34 = V_MOV_B32_e32 63, implicit %exec
%27 = V_AND_B32_e64 %26, %24, implicit %exec
- FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit %exec, implicit %flat_scr
%28 = V_AND_B32_e64 %24, %26, implicit %exec
- FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
%29 = V_AND_B32_e32 %26, %24, implicit %exec
- FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit %exec, implicit %flat_scr
%30 = V_AND_B32_e64 %26, %26, implicit %exec
- FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit %exec, implicit %flat_scr
%31 = V_AND_B32_e64 %34, %34, implicit %exec
- FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
@@ -285,11 +206,11 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
%5 = S_MOV_B32 1
%6 = COPY %4.sub1
%7 = COPY %4.sub0
@@ -298,7 +219,7 @@ body: |
%10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
%12 = S_LSHL_B32 killed %5, 12, implicit-def dead %scc
%13 = COPY %12
- BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out)
+ BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@@ -390,7 +311,7 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%2 = COPY %vgpr0
@@ -411,34 +332,34 @@ body: |
%27 = S_MOV_B32 -4
%11 = V_LSHLREV_B32_e64 12, %10, implicit %exec
- FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
%12 = V_LSHLREV_B32_e64 %7, 12, implicit %exec
- FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
%13 = V_LSHL_B32_e64 %7, 12, implicit %exec
- FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
%14 = V_LSHL_B32_e64 12, %7, implicit %exec
- FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
%15 = V_LSHL_B32_e64 12, %24, implicit %exec
- FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
%22 = V_LSHL_B32_e64 %6, 12, implicit %exec
- FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
%23 = V_LSHL_B32_e64 %6, 32, implicit %exec
- FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
%25 = V_LSHL_B32_e32 %6, %6, implicit %exec
- FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
%26 = V_LSHLREV_B32_e32 11, %24, implicit %exec
- FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
%28 = V_LSHL_B32_e32 %27, %6, implicit %exec
- FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
@@ -485,11 +406,11 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
%5 = S_MOV_B32 999123
%6 = COPY %4.sub1
%7 = COPY %4.sub0
@@ -498,7 +419,7 @@ body: |
%10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
%12 = S_ASHR_I32 killed %5, 12, implicit-def dead %scc
%13 = COPY %12
- BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out)
+ BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@@ -593,12 +514,12 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%2 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %3 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %3 = S_LOAD_DWORDX2_IMM %0, 36, 0
%15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
%16 = REG_SEQUENCE %2, 1, %15, 2
%17 = V_LSHLREV_B64 2, killed %16, implicit %exec
@@ -619,34 +540,34 @@ body: |
%35 = V_MOV_B32_e32 2, implicit %exec
%11 = V_ASHRREV_I32_e64 8, %10, implicit %exec
- FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
%12 = V_ASHRREV_I32_e64 %8, %10, implicit %exec
- FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
%13 = V_ASHR_I32_e64 %7, 3, implicit %exec
- FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
%14 = V_ASHR_I32_e64 7, %32, implicit %exec
- FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
%15 = V_ASHR_I32_e64 %27, %24, implicit %exec
- FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
%22 = V_ASHR_I32_e64 %6, 4, implicit %exec
- FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
%23 = V_ASHR_I32_e64 %6, %33, implicit %exec
- FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
%25 = V_ASHR_I32_e32 %34, %34, implicit %exec
- FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
%26 = V_ASHRREV_I32_e32 11, %10, implicit %exec
- FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
%28 = V_ASHR_I32_e32 %27, %35, implicit %exec
- FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
@@ -693,11 +614,11 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
%5 = S_MOV_B32 -999123
%6 = COPY %4.sub1
%7 = COPY %4.sub0
@@ -706,7 +627,7 @@ body: |
%10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
%12 = S_LSHR_B32 killed %5, 12, implicit-def dead %scc
%13 = COPY %12
- BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out)
+ BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@@ -802,12 +723,12 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%2 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %3 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %3 = S_LOAD_DWORDX2_IMM %0, 36, 0
%15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
%16 = REG_SEQUENCE %2, 1, %15, 2
%17 = V_LSHLREV_B64 2, killed %16, implicit %exec
@@ -828,34 +749,34 @@ body: |
%35 = V_MOV_B32_e32 2, implicit %exec
%11 = V_LSHRREV_B32_e64 8, %10, implicit %exec
- FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
%12 = V_LSHRREV_B32_e64 %8, %10, implicit %exec
- FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
%13 = V_LSHR_B32_e64 %7, 3, implicit %exec
- FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
%14 = V_LSHR_B32_e64 7, %32, implicit %exec
- FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
%15 = V_LSHR_B32_e64 %27, %24, implicit %exec
- FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
%22 = V_LSHR_B32_e64 %6, 4, implicit %exec
- FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
%23 = V_LSHR_B32_e64 %6, %33, implicit %exec
- FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
%25 = V_LSHR_B32_e32 %34, %34, implicit %exec
- FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
%26 = V_LSHRREV_B32_e32 11, %10, implicit %exec
- FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
%28 = V_LSHR_B32_e32 %27, %35, implicit %exec
- FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
diff --git a/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll b/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
index 8611cd080e15..09d4b2c8bd77 100644
--- a/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
+++ b/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
@@ -107,7 +107,7 @@ define amdgpu_kernel void @fold_mi_v_not_0(i64 addrspace(1)* %out) {
; GCN: v_bcnt_u32_b32{{(_e64)*}} v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, 0{{$}}
; GCN: v_bcnt_u32_b32{{(_e32)*(_e64)*}} v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, v[[RESULT_LO]]{{$}}
; GCN-DAG: v_not_b32_e32 v[[RESULT_LO]], v[[RESULT_LO]]
-; GCN-DAG: v_or_b32_e32 v[[RESULT_LO]], v[[VREG1_LO]], v[[RESULT_LO]]
+; GCN-DAG: v_or_b32_e32 v[[RESULT_LO]], v[[RESULT_LO]], v[[VREG1_LO]]
; GCN-DAG: v_mov_b32_e32 v[[RESULT_HI:[0-9]+]], v[[VREG1_HI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
define amdgpu_kernel void @fold_mi_or_neg1(i64 addrspace(1)* %out) {
diff --git a/test/CodeGen/AMDGPU/copy-illegal-type.ll b/test/CodeGen/AMDGPU/copy-illegal-type.ll
index d772d1b67936..e39bd60a1cc8 100644
--- a/test/CodeGen/AMDGPU/copy-illegal-type.ll
+++ b/test/CodeGen/AMDGPU/copy-illegal-type.ll
@@ -5,35 +5,41 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
; FUNC-LABEL: {{^}}test_copy_v4i8:
-; GCN: buffer_load_dword [[REG:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[REG:v[0-9]+]]
; GCN: buffer_store_dword [[REG]]
; GCN: s_endpgm
define amdgpu_kernel void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
- %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
+ %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
+ %val = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: {{^}}test_copy_v4i8_x2:
-; GCN: buffer_load_dword [[REG:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[REG:v[0-9]+]]
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: s_endpgm
define amdgpu_kernel void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
- %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
+ %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
+ %val = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
ret void
}
; FUNC-LABEL: {{^}}test_copy_v4i8_x3:
-; GCN: buffer_load_dword [[REG:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[REG:v[0-9]+]]
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: s_endpgm
define amdgpu_kernel void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
- %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
+ %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
+ %val = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
@@ -41,14 +47,16 @@ define amdgpu_kernel void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x
}
; FUNC-LABEL: {{^}}test_copy_v4i8_x4:
-; GCN: buffer_load_dword [[REG:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[REG:v[0-9]+]]
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: s_endpgm
define amdgpu_kernel void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
- %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
+ %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
+ %val = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
@@ -57,7 +65,7 @@ define amdgpu_kernel void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x
}
; FUNC-LABEL: {{^}}test_copy_v4i8_extra_use:
-; GCN: buffer_load_dword
+; GCN: {{buffer|flat}}_load_dword
; GCN-DAG: v_lshrrev_b32
; GCN: v_and_b32
; GCN: v_or_b32
@@ -66,7 +74,9 @@ define amdgpu_kernel void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x
; GCN: s_endpgm
define amdgpu_kernel void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
- %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
+ %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
+ %val = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 4
%add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
@@ -97,19 +107,21 @@ define amdgpu_kernel void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %o
}
; FUNC-LABEL: {{^}}test_copy_v3i8_align4:
-; GCN: buffer_load_dword
+; GCN: {{buffer|flat}}_load_dword
; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
; GCN: s_endpgm
define amdgpu_kernel void @test_copy_v3i8_align4(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
- %val = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
+ %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %in, i32 %tid.x
+ %val = load <3 x i8>, <3 x i8> addrspace(1)* %gep, align 4
store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: {{^}}test_copy_v3i8_align2:
-; GCN-DAG: buffer_load_ushort v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; GCN-DAG: buffer_load_ubyte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
+; GCN-DAG: {{buffer|flat}}_load_ushort v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; GCN-DAG: {{buffer|flat}}_load_ubyte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
; GCN: s_endpgm
@@ -120,9 +132,9 @@ define amdgpu_kernel void @test_copy_v3i8_align2(<3 x i8> addrspace(1)* %out, <3
}
; FUNC-LABEL: {{^}}test_copy_v3i8_align1:
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
@@ -135,10 +147,10 @@ define amdgpu_kernel void @test_copy_v3i8_align1(<3 x i8> addrspace(1)* %out, <3
}
; FUNC-LABEL: {{^}}test_copy_v4i8_volatile_load:
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
; GCN: buffer_store_dword
; GCN: s_endpgm
define amdgpu_kernel void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
@@ -148,10 +160,10 @@ define amdgpu_kernel void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %
}
; FUNC-LABEL: {{^}}test_copy_v4i8_volatile_store:
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
+; GCN: {{buffer|flat}}_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
diff --git a/test/CodeGen/AMDGPU/ctlz.ll b/test/CodeGen/AMDGPU/ctlz.ll
index 149c50685b1d..a544cbe890b5 100644
--- a/test/CodeGen/AMDGPU/ctlz.ll
+++ b/test/CodeGen/AMDGPU/ctlz.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=EG -check-prefix=FUNC %s
declare i7 @llvm.ctlz.i7(i7, i1) nounwind readnone
declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone
@@ -34,9 +34,9 @@ define amdgpu_kernel void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val)
}
; FUNC-LABEL: {{^}}v_ctlz_i32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
-; GCN-DAG: v_ffbh_u32_e32 [[CTLZ:v[0-9]+]], [[VAL]]
-; GCN-DAG: v_cmp_ne_u32_e32 vcc, 0, [[CTLZ]]
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
+; GCN: v_ffbh_u32_e32 [[CTLZ:v[0-9]+]], [[VAL]]
+; GCN: v_cmp_ne_u32_e32 vcc, 0, [[VAL]]
; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], 32, [[CTLZ]], vcc
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
@@ -44,14 +44,16 @@ define amdgpu_kernel void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val)
; EG: FFBH_UINT
; EG: CNDE_INT
define amdgpu_kernel void @v_ctlz_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
store i32 %ctlz, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: {{^}}v_ctlz_v2i32:
-; GCN: buffer_load_dwordx2
+; GCN: {{buffer|flat}}_load_dwordx2
; GCN: v_ffbh_u32_e32
; GCN: v_ffbh_u32_e32
; GCN: buffer_store_dwordx2
@@ -62,14 +64,16 @@ define amdgpu_kernel void @v_ctlz_i32(i32 addrspace(1)* noalias %out, i32 addrsp
; EG: FFBH_UINT
; EG: CNDE_INT
define amdgpu_kernel void @v_ctlz_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
- %val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %valptr, i32 %tid
+ %val = load <2 x i32>, <2 x i32> addrspace(1)* %in.gep, align 8
%ctlz = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %val, i1 false) nounwind readnone
store <2 x i32> %ctlz, <2 x i32> addrspace(1)* %out, align 8
ret void
}
; FUNC-LABEL: {{^}}v_ctlz_v4i32:
-; GCN: buffer_load_dwordx4
+; GCN: {{buffer|flat}}_load_dwordx4
; GCN: v_ffbh_u32_e32
; GCN: v_ffbh_u32_e32
; GCN: v_ffbh_u32_e32
@@ -90,16 +94,25 @@ define amdgpu_kernel void @v_ctlz_v2i32(<2 x i32> addrspace(1)* noalias %out, <2
; EG-DAG: FFBH_UINT
; EG-DAG: CNDE_INT
define amdgpu_kernel void @v_ctlz_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
- %val = load <4 x i32>, <4 x i32> addrspace(1)* %valptr, align 16
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %valptr, i32 %tid
+ %val = load <4 x i32>, <4 x i32> addrspace(1)* %in.gep, align 16
%ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val, i1 false) nounwind readnone
store <4 x i32> %ctlz, <4 x i32> addrspace(1)* %out, align 16
ret void
}
; FUNC-LABEL: {{^}}v_ctlz_i8:
-; GCN: buffer_load_ubyte [[VAL:v[0-9]+]],
-; SI-DAG: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
-; VI-DAG: v_ffbh_u32_sdwa [[RESULT:v[0-9]+]], [[VAL]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+; GCN: {{buffer|flat}}_load_ubyte [[VAL:v[0-9]+]],
+; SI-DAG: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
+; VI-DAG: v_ffbh_u32_sdwa [[FFBH:v[0-9]+]], [[VAL]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+; SI: v_cmp_ne_u32_e32 vcc, 0, [[VAL]]
+; VI: v_cmp_ne_u16_e32 vcc, 0, [[VAL]]
+
+; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 32, [[FFBH]], vcc
+
+; SI: v_subrev_i32_e32 [[RESULT:v[0-9]+]], vcc, 24, [[SELECT]]
+; VI: v_add_i32_e32 [[RESULT:v[0-9]+]], vcc, -16, [[SELECT]]
; GCN: buffer_store_byte [[RESULT]],
; GCN: s_endpgm
define amdgpu_kernel void @v_ctlz_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
@@ -136,12 +149,12 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64
; FUNC-LABEL: {{^}}v_ctlz_i64:
; GCN-DAG: {{buffer|flat}}_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
-; GCN-DAG: v_cmp_eq_u32_e64 [[CMPHI:s\[[0-9]+:[0-9]+\]]], 0, v[[HI]]
+; GCN-DAG: v_cmp_eq_u32_e32 vcc, 0, v[[HI]]
; GCN-DAG: v_ffbh_u32_e32 [[FFBH_LO:v[0-9]+]], v[[LO]]
; GCN-DAG: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, 32, [[FFBH_LO]]
; GCN-DAG: v_ffbh_u32_e32 [[FFBH_HI:v[0-9]+]], v[[HI]]
-; GCN-DAG: v_cndmask_b32_e64 v[[CTLZ:[0-9]+]], [[FFBH_HI]], [[ADD]], [[CMPHI]]
-; GCN-DAG: v_or_b32_e32 [[OR:v[0-9]+]], v[[HI]], v[[LO]]
+; GCN-DAG: v_cndmask_b32_e32 v[[CTLZ:[0-9]+]], [[FFBH_HI]], [[ADD]], vcc
+; GCN-DAG: v_or_b32_e32 [[OR:v[0-9]+]], v[[LO]], v[[HI]]
; GCN-DAG: v_cmp_ne_u32_e32 vcc, 0, [[OR]]
; GCN-DAG: v_cndmask_b32_e32 v[[CLTZ_LO:[0-9]+]], 64, v[[CTLZ:[0-9]+]], vcc
; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CLTZ_LO]]:[[CTLZ_HI:[0-9]+]]{{\]}}
@@ -168,12 +181,14 @@ define amdgpu_kernel void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64
}
; FUNC-LABEL: {{^}}v_ctlz_i32_sel_eq_neg1:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
- define amdgpu_kernel void @v_ctlz_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+define amdgpu_kernel void @v_ctlz_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
%cmp = icmp eq i32 %val, 0
%sel = select i1 %cmp, i32 -1, i32 %ctlz
@@ -182,12 +197,14 @@ define amdgpu_kernel void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64
}
; FUNC-LABEL: {{^}}v_ctlz_i32_sel_ne_neg1:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
define amdgpu_kernel void @v_ctlz_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
%cmp = icmp ne i32 %val, 0
%sel = select i1 %cmp, i32 %ctlz, i32 -1
@@ -197,13 +214,15 @@ define amdgpu_kernel void @v_ctlz_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out
; TODO: Should be able to eliminate select here as well.
; FUNC-LABEL: {{^}}v_ctlz_i32_sel_eq_bitwidth:
-; GCN: buffer_load_dword
+; GCN: {{buffer|flat}}_load_dword
; GCN: v_ffbh_u32_e32
; GCN: v_cmp
; GCN: v_cndmask
; GCN: s_endpgm
define amdgpu_kernel void @v_ctlz_i32_sel_eq_bitwidth(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
%cmp = icmp eq i32 %ctlz, 32
%sel = select i1 %cmp, i32 -1, i32 %ctlz
@@ -212,13 +231,15 @@ define amdgpu_kernel void @v_ctlz_i32_sel_eq_bitwidth(i32 addrspace(1)* noalias
}
; FUNC-LABEL: {{^}}v_ctlz_i32_sel_ne_bitwidth:
-; GCN: buffer_load_dword
+; GCN: {{buffer|flat}}_load_dword
; GCN: v_ffbh_u32_e32
; GCN: v_cmp
; GCN: v_cndmask
; GCN: s_endpgm
define amdgpu_kernel void @v_ctlz_i32_sel_ne_bitwidth(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
%cmp = icmp ne i32 %ctlz, 32
%sel = select i1 %cmp, i32 %ctlz, i32 -1
@@ -242,7 +263,7 @@ define amdgpu_kernel void @v_ctlz_i32_sel_ne_bitwidth(i32 addrspace(1)* noalias
}
; FUNC-LABEL: {{^}}v_ctlz_i16_sel_eq_neg1:
-; SI: buffer_load_ushort [[VAL:v[0-9]+]],
+; SI: {{buffer|flat}}_load_ushort [[VAL:v[0-9]+]],
; SI: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
; SI: buffer_store_short [[FFBH]],
define amdgpu_kernel void @v_ctlz_i16_sel_eq_neg1(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) nounwind {
diff --git a/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index 48f3e4401f1a..7500da536307 100644
--- a/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -29,21 +29,23 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out,
}
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
define amdgpu_kernel void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
store i32 %ctlz, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_v2i32:
-; GCN: buffer_load_dwordx2
+; GCN: {{buffer|flat}}_load_dwordx2
; GCN: v_ffbh_u32_e32
; GCN: v_ffbh_u32_e32
; GCN: buffer_store_dwordx2
@@ -52,14 +54,16 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out,
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
define amdgpu_kernel void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
- %val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %valptr, i32 %tid
+ %val = load <2 x i32>, <2 x i32> addrspace(1)* %in.gep, align 8
%ctlz = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
store <2 x i32> %ctlz, <2 x i32> addrspace(1)* %out, align 8
ret void
}
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_v4i32:
-; GCN: buffer_load_dwordx4
+; GCN: {{buffer|flat}}_load_dwordx4
; GCN: v_ffbh_u32_e32
; GCN: v_ffbh_u32_e32
; GCN: v_ffbh_u32_e32
@@ -72,18 +76,22 @@ define amdgpu_kernel void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noali
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
define amdgpu_kernel void @v_ctlz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
- %val = load <4 x i32>, <4 x i32> addrspace(1)* %valptr, align 16
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %valptr, i32 %tid
+ %val = load <4 x i32>, <4 x i32> addrspace(1)* %in.gep, align 16
%ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
store <4 x i32> %ctlz, <4 x i32> addrspace(1)* %out, align 16
ret void
}
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i8:
-; GCN: buffer_load_ubyte [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_ubyte [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_byte [[RESULT]],
define amdgpu_kernel void @v_ctlz_zero_undef_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
- %val = load i8, i8 addrspace(1)* %valptr
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i8, i8 addrspace(1)* %valptr, i32 %tid
+ %val = load i8, i8 addrspace(1)* %in.gep
%ctlz = call i8 @llvm.ctlz.i8(i8 %val, i1 true) nounwind readnone
store i8 %ctlz, i8 addrspace(1)* %out
ret void
@@ -116,11 +124,11 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i64:
; GCN-DAG: {{buffer|flat}}_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
-; GCN-DAG: v_cmp_eq_u32_e64 [[CMPHI:s\[[0-9]+:[0-9]+\]]], 0, v[[HI]]
+; GCN-DAG: v_cmp_eq_u32_e32 vcc, 0, v[[HI]]
; GCN-DAG: v_ffbh_u32_e32 [[FFBH_LO:v[0-9]+]], v[[LO]]
; GCN-DAG: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, 32, [[FFBH_LO]]
; GCN-DAG: v_ffbh_u32_e32 [[FFBH_HI:v[0-9]+]], v[[HI]]
-; GCN-DAG: v_cndmask_b32_e64 v[[CTLZ:[0-9]+]], [[FFBH_HI]], [[FFBH_LO]]
+; GCN-DAG: v_cndmask_b32_e32 v[[CTLZ:[0-9]+]], [[FFBH_HI]], [[FFBH_LO]]
; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CTLZ]]:[[CTLZ_HI:[0-9]+]]{{\]}}
define amdgpu_kernel void @v_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
@@ -145,11 +153,13 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias
}
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32_sel_eq_neg1:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
- define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp eq i32 %val, 0
%sel = select i1 %cmp, i32 -1, i32 %ctlz
@@ -158,11 +168,13 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias
}
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32_sel_ne_neg1:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp ne i32 %val, 0
%sel = select i1 %cmp, i32 %ctlz, i32 -1
@@ -186,15 +198,17 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noa
}
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32_sel_eq_neg1_two_use:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN-DAG: v_ffbh_u32_e32 [[RESULT0:v[0-9]+]], [[VAL]]
; GCN-DAG: v_cmp_eq_u32_e32 vcc, 0, [[VAL]]
; GCN-DAG: v_cndmask_b32_e64 [[RESULT1:v[0-9]+]], 0, 1, vcc
; GCN-DAG: buffer_store_dword [[RESULT0]]
; GCN-DAG: buffer_store_byte [[RESULT1]]
; GCN: s_endpgm
- define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_neg1_two_use(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_neg1_two_use(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp eq i32 %val, 0
%sel = select i1 %cmp, i32 -1, i32 %ctlz
@@ -205,13 +219,15 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noa
; Selected on wrong constant
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32_sel_eq_0:
-; GCN: buffer_load_dword
+; GCN: {{buffer|flat}}_load_dword
; GCN: v_ffbh_u32_e32
; GCN: v_cmp
; GCN: v_cndmask
; GCN: buffer_store_dword
- define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp eq i32 %val, 0
%sel = select i1 %cmp, i32 0, i32 %ctlz
@@ -221,13 +237,15 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noa
; Selected on wrong constant
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32_sel_ne_0:
-; GCN: buffer_load_dword
+; GCN: {{buffer|flat}}_load_dword
; GCN: v_ffbh_u32_e32
; GCN: v_cmp
; GCN: v_cndmask
; GCN: buffer_store_dword
define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_ne_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp ne i32 %val, 0
%sel = select i1 %cmp, i32 %ctlz, i32 0
@@ -237,13 +255,15 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_ne_0(i32 addrspace(1)* noal
; Compare on wrong constant
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32_sel_eq_cmp_non0:
-; GCN: buffer_load_dword
+; GCN: {{buffer|flat}}_load_dword
; GCN: v_ffbh_u32_e32
; GCN: v_cmp
; GCN: v_cndmask
; GCN: buffer_store_dword
- define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_cmp_non0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_cmp_non0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp eq i32 %val, 1
%sel = select i1 %cmp, i32 0, i32 %ctlz
@@ -253,13 +273,15 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_ne_0(i32 addrspace(1)* noal
; Selected on wrong constant
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32_sel_ne_cmp_non0:
-; GCN: buffer_load_dword
+; GCN: {{buffer|flat}}_load_dword
; GCN: v_ffbh_u32_e32
; GCN: v_cmp
; GCN: v_cndmask
; GCN: buffer_store_dword
define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_ne_cmp_non0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp ne i32 %val, 1
%sel = select i1 %cmp, i32 %ctlz, i32 0
diff --git a/test/CodeGen/AMDGPU/ctpop.ll b/test/CodeGen/AMDGPU/ctpop.ll
index aa913ad406d2..68b39bad2bc1 100644
--- a/test/CodeGen/AMDGPU/ctpop.ll
+++ b/test/CodeGen/AMDGPU/ctpop.ll
@@ -8,6 +8,8 @@ declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readnone
declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) nounwind readnone
declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>) nounwind readnone
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+
; FUNC-LABEL: {{^}}s_ctpop_i32:
; GCN: s_load_dword [[SVAL:s[0-9]+]],
; GCN: s_bcnt1_i32_b32 [[SRESULT:s[0-9]+]], [[SVAL]]
@@ -24,22 +26,24 @@ define amdgpu_kernel void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val)
; XXX - Why 0 in register?
; FUNC-LABEL: {{^}}v_ctpop_i32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 0
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %val = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
store i32 %ctpop, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: {{^}}v_ctpop_add_chain_i32:
-; GCN: buffer_load_dword [[VAL1:v[0-9]+]],
-; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL0:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL1:v[0-9]+]],
; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], [[VAL1]], 0
; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
@@ -49,8 +53,11 @@ define amdgpu_kernel void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrs
; EG: BCNT_INT
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1) nounwind {
- %val0 = load i32, i32 addrspace(1)* %in0, align 4
- %val1 = load i32, i32 addrspace(1)* %in1, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %tid
+ %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %tid
+ %val0 = load i32, i32 addrspace(1)* %in0.gep, align 4
+ %val1 = load i32, i32 addrspace(1)* %in1.gep, align 4
%ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
%ctpop1 = call i32 @llvm.ctpop.i32(i32 %val1) nounwind readnone
%add = add i32 %ctpop0, %ctpop1
@@ -59,15 +66,17 @@ define amdgpu_kernel void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out,
}
; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32:
-; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL0:v[0-9]+]],
; GCN: s_waitcnt
; GCN-NEXT: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define amdgpu_kernel void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1, i32 %sval) nounwind {
- %val0 = load i32, i32 addrspace(1)* %in0, align 4
- %ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
- %add = add i32 %ctpop0, %sval
+define amdgpu_kernel void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %sval) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %ctpop, %sval
store i32 %add, i32 addrspace(1)* %out, align 4
ret void
}
@@ -80,7 +89,9 @@ define amdgpu_kernel void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out,
; EG: BCNT_INT
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) nounwind {
- %val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 %tid
+ %val = load <2 x i32>, <2 x i32> addrspace(1)* %in.gep, align 8
%ctpop = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %val) nounwind readnone
store <2 x i32> %ctpop, <2 x i32> addrspace(1)* %out, align 8
ret void
@@ -98,7 +109,9 @@ define amdgpu_kernel void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <
; EG: BCNT_INT
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %in) nounwind {
- %val = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 %tid
+ %val = load <4 x i32>, <4 x i32> addrspace(1)* %in.gep, align 16
%ctpop = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %val) nounwind readnone
store <4 x i32> %ctpop, <4 x i32> addrspace(1)* %out, align 16
ret void
@@ -124,7 +137,9 @@ define amdgpu_kernel void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <
; EG: BCNT_INT
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrspace(1)* noalias %in) nounwind {
- %val = load <8 x i32>, <8 x i32> addrspace(1)* %in, align 32
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <8 x i32>, <8 x i32> addrspace(1)* %in, i32 %tid
+ %val = load <8 x i32>, <8 x i32> addrspace(1)* %in.gep, align 32
%ctpop = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %val) nounwind readnone
store <8 x i32> %ctpop, <8 x i32> addrspace(1)* %out, align 32
ret void
@@ -166,21 +181,25 @@ define amdgpu_kernel void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <
; EG: BCNT_INT
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> addrspace(1)* noalias %in) nounwind {
- %val = load <16 x i32>, <16 x i32> addrspace(1)* %in, align 32
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <16 x i32>, <16 x i32> addrspace(1)* %in, i32 %tid
+ %val = load <16 x i32>, <16 x i32> addrspace(1)* %in.gep, align 32
%ctpop = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %val) nounwind readnone
store <16 x i32> %ctpop, <16 x i32> addrspace(1)* %out, align 32
ret void
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_inline_constant:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 4
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %val = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 %ctpop, 4
store i32 %add, i32 addrspace(1)* %out, align 4
@@ -188,14 +207,16 @@ define amdgpu_kernel void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noa
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_inline_constant_inv:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 4
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %val = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 4, %ctpop
store i32 %add, i32 addrspace(1)* %out, align 4
@@ -203,14 +224,16 @@ define amdgpu_kernel void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_literal:
-; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN-DAG: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN-DAG: v_mov_b32_e32 [[LIT:v[0-9]+]], 0x1869f
; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %val = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 %ctpop, 99999
store i32 %add, i32 addrspace(1)* %out, align 4
@@ -218,7 +241,7 @@ define amdgpu_kernel void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %ou
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_var:
-; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN-DAG: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN-DAG: s_load_dword [[VAR:s[0-9]+]],
; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
; GCN: buffer_store_dword [[RESULT]],
@@ -226,7 +249,9 @@ define amdgpu_kernel void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %ou
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
- %val = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 %ctpop, %const
store i32 %add, i32 addrspace(1)* %out, align 4
@@ -234,7 +259,7 @@ define amdgpu_kernel void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_var_inv:
-; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN-DAG: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; GCN-DAG: s_load_dword [[VAR:s[0-9]+]],
; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
; GCN: buffer_store_dword [[RESULT]],
@@ -242,7 +267,9 @@ define amdgpu_kernel void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
- %val = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 %const, %ctpop
store i32 %add, i32 addrspace(1)* %out, align 4
@@ -250,18 +277,22 @@ define amdgpu_kernel void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %ou
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_vvar_inv:
-; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], {{0$}}
-; GCN-DAG: buffer_load_dword [[VAR:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0 offset:16
-; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; SI: buffer_load_dword [[VAR:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
+; SI: buffer_load_dword [[VAL:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
+; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAR]], [[VAL]]
+; VI: flat_load_dword [[VAL:v[0-9]+]], v[{{[0-9]+:[0-9]+}}]
+; VI: flat_load_dword [[VAR:v[0-9]+]], v[{{[0-9]+:[0-9]+}}]
; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
; EG: BCNT_INT
define amdgpu_kernel void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 addrspace(1)* noalias %constptr) nounwind {
- %val = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
- %gep = getelementptr i32, i32 addrspace(1)* %constptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %constptr, i32 %tid
%const = load i32, i32 addrspace(1)* %gep, align 4
%add = add i32 %const, %ctpop
store i32 %add, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/ctpop64.ll b/test/CodeGen/AMDGPU/ctpop64.ll
index f18bd9fd8174..4850370851f6 100644
--- a/test/CodeGen/AMDGPU/ctpop64.ll
+++ b/test/CodeGen/AMDGPU/ctpop64.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+
declare i64 @llvm.ctpop.i64(i64) nounwind readnone
declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) nounwind readnone
@@ -25,14 +27,16 @@ define amdgpu_kernel void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val)
}
; FUNC-LABEL: {{^}}v_ctpop_i64:
-; GCN: buffer_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
+; GCN: {{buffer|flat}}_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
; SI-NEXT: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
; VI-NEXT: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
- %val = load i64, i64 addrspace(1)* %in, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %val = load i64, i64 addrspace(1)* %in.gep, align 8
%ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
%truncctpop = trunc i64 %ctpop to i32
store i32 %truncctpop, i32 addrspace(1)* %out, align 4
@@ -40,7 +44,7 @@ define amdgpu_kernel void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrs
}
; FUNC-LABEL: {{^}}v_ctpop_i64_user:
-; GCN: buffer_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
+; GCN: {{buffer|flat}}_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
; SI-NEXT: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
; VI-NEXT: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
@@ -49,7 +53,9 @@ define amdgpu_kernel void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrs
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_i64_user(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %s.val) nounwind {
- %val = load i64, i64 addrspace(1)* %in, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %val = load i64, i64 addrspace(1)* %in.gep, align 8
%ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
%or = or i64 %ctpop, %s.val
store i64 %or, i64 addrspace(1)* %out
@@ -87,7 +93,9 @@ define amdgpu_kernel void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <
; GCN: v_bcnt_u32_b32
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in) nounwind {
- %val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i32 %tid
+ %val = load <2 x i64>, <2 x i64> addrspace(1)* %in.gep, align 16
%ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
%truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
@@ -105,7 +113,9 @@ define amdgpu_kernel void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <
; GCN: v_bcnt_u32_b32
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrspace(1)* noalias %in) nounwind {
- %val = load <4 x i64>, <4 x i64> addrspace(1)* %in, align 32
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
+ %val = load <4 x i64>, <4 x i64> addrspace(1)* %in.gep, align 32
%ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
%truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
@@ -169,7 +179,8 @@ define amdgpu_kernel void @s_ctpop_i65(i32 addrspace(1)* noalias %out, i65 %val)
; FIXME: Should not have extra add
; FUNC-LABEL: {{^}}v_ctpop_i128:
-; GCN: buffer_load_dwordx4 v{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI: buffer_load_dwordx4 v{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
+; VI: flat_load_dwordx4 v{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}
; GCN-DAG: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT0:v[0-9]+]], v{{[0-9]+}}, 0
; GCN-DAG: v_bcnt_u32_b32{{(_e32)*(_e64)*}} [[MIDRESULT1:v[0-9]+]], v[[VAL3]], [[MIDRESULT0]]
@@ -182,7 +193,9 @@ define amdgpu_kernel void @s_ctpop_i65(i32 addrspace(1)* noalias %out, i65 %val)
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_i128(i32 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in) nounwind {
- %val = load i128, i128 addrspace(1)* %in, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %tid
+ %val = load i128, i128 addrspace(1)* %in.gep, align 8
%ctpop = call i128 @llvm.ctpop.i128(i128 %val) nounwind readnone
%truncctpop = trunc i128 %ctpop to i32
store i32 %truncctpop, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/cttz_zero_undef.ll b/test/CodeGen/AMDGPU/cttz_zero_undef.ll
index 1fa6407647eb..1bfd38d94bfd 100644
--- a/test/CodeGen/AMDGPU/cttz_zero_undef.ll
+++ b/test/CodeGen/AMDGPU/cttz_zero_undef.ll
@@ -5,6 +5,7 @@
declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>, i1) nounwind readnone
declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1) nounwind readnone
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; FUNC-LABEL: {{^}}s_cttz_zero_undef_i32:
; SI: s_load_dword [[VAL:s[0-9]+]],
@@ -21,21 +22,23 @@ define amdgpu_kernel void @s_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out,
}
; FUNC-LABEL: {{^}}v_cttz_zero_undef_i32:
-; SI: buffer_load_dword [[VAL:v[0-9]+]],
+; SI: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]],
; SI: v_ffbl_b32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
; EG: FFBL_INT {{\*? *}}[[RESULT]]
define amdgpu_kernel void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr, align 4
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
+ %val = load i32, i32 addrspace(1)* %in.gep, align 4
%cttz = call i32 @llvm.cttz.i32(i32 %val, i1 true) nounwind readnone
store i32 %cttz, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: {{^}}v_cttz_zero_undef_v2i32:
-; SI: buffer_load_dwordx2
+; SI: {{buffer|flat}}_load_dwordx2
; SI: v_ffbl_b32_e32
; SI: v_ffbl_b32_e32
; SI: buffer_store_dwordx2
@@ -44,14 +47,16 @@ define amdgpu_kernel void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out,
; EG: FFBL_INT {{\*? *}}[[RESULT]]
; EG: FFBL_INT {{\*? *}}[[RESULT]]
define amdgpu_kernel void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
- %val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr, align 8
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %valptr, i32 %tid
+ %val = load <2 x i32>, <2 x i32> addrspace(1)* %in.gep, align 8
%cttz = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
store <2 x i32> %cttz, <2 x i32> addrspace(1)* %out, align 8
ret void
}
; FUNC-LABEL: {{^}}v_cttz_zero_undef_v4i32:
-; SI: buffer_load_dwordx4
+; SI: {{buffer|flat}}_load_dwordx4
; SI: v_ffbl_b32_e32
; SI: v_ffbl_b32_e32
; SI: v_ffbl_b32_e32
@@ -64,7 +69,9 @@ define amdgpu_kernel void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noali
; EG: FFBL_INT {{\*? *}}[[RESULT]]
; EG: FFBL_INT {{\*? *}}[[RESULT]]
define amdgpu_kernel void @v_cttz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
- %val = load <4 x i32>, <4 x i32> addrspace(1)* %valptr, align 16
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %valptr, i32 %tid
+ %val = load <4 x i32>, <4 x i32> addrspace(1)* %in.gep, align 16
%cttz = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
store <4 x i32> %cttz, <4 x i32> addrspace(1)* %out, align 16
ret void
diff --git a/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll b/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
index 0328ce31002d..f839129fc3d8 100644
--- a/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
+++ b/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
@@ -5,46 +5,52 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
; GCN-LABEL: {{^}}load_i8_to_f32:
-; GCN: buffer_load_ubyte [[LOADREG:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_ubyte [[LOADREG:v[0-9]+]],
; GCN-NOT: bfe
; GCN-NOT: lshr
; GCN: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
; GCN: buffer_store_dword [[CONV]],
define amdgpu_kernel void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
- %load = load i8, i8 addrspace(1)* %in, align 1
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i8, i8 addrspace(1)* %in, i32 %tid
+ %load = load i8, i8 addrspace(1)* %gep, align 1
%cvt = uitofp i8 %load to float
store float %cvt, float addrspace(1)* %out, align 4
ret void
}
; GCN-LABEL: {{^}}load_v2i8_to_v2f32:
-; GCN: buffer_load_ushort [[LD:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_ushort [[LD:v[0-9]+]]
; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[LD]]
; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LD]]
; GCN: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define amdgpu_kernel void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <2 x i8>, <2 x i8> addrspace(1)* %in, align 2
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <2 x i8>, <2 x i8> addrspace(1)* %in, i32 %tid
+ %load = load <2 x i8>, <2 x i8> addrspace(1)* %gep, align 2
%cvt = uitofp <2 x i8> %load to <2 x float>
store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}load_v3i8_to_v3f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
; GCN-NOT: v_cvt_f32_ubyte3_e32
; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, [[VAL]]
; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[VAL]]
; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[VAL]]
; GCN: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define amdgpu_kernel void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %in, i32 %tid
+ %load = load <3 x i8>, <3 x i8> addrspace(1)* %gep, align 4
%cvt = uitofp <3 x i8> %load to <3 x float>
store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}load_v4i8_to_v4f32:
-; GCN: buffer_load_dword [[LOADREG:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[LOADREG:v[0-9]+]]
; GCN-NOT: bfe
; GCN-NOT: lshr
; GCN-DAG: v_cvt_f32_ubyte3_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
@@ -53,7 +59,9 @@ define amdgpu_kernel void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias
; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
; GCN: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define amdgpu_kernel void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid
+ %load = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 4
%cvt = uitofp <4 x i8> %load to <4 x float>
store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
ret void
@@ -64,10 +72,10 @@ define amdgpu_kernel void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias
; FIXME: Packing bytes
; GCN-LABEL: {{^}}load_v4i8_to_v4f32_unaligned:
-; GCN: buffer_load_ubyte [[LOADREG3:v[0-9]+]]
-; GCN: buffer_load_ubyte [[LOADREG2:v[0-9]+]]
-; GCN: buffer_load_ubyte [[LOADREG1:v[0-9]+]]
-; GCN: buffer_load_ubyte [[LOADREG0:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_ubyte [[LOADREG3:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_ubyte [[LOADREG2:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_ubyte [[LOADREG1:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_ubyte [[LOADREG0:v[0-9]+]]
; GCN-DAG: v_lshlrev_b32
; GCN-DAG: v_or_b32
; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]],
@@ -77,7 +85,9 @@ define amdgpu_kernel void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias
; GCN: buffer_store_dwordx4
define amdgpu_kernel void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid
+ %load = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 1
%cvt = uitofp <4 x i8> %load to <4 x float>
store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
ret void
@@ -124,14 +134,16 @@ define amdgpu_kernel void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* n
; GCN-LABEL: {{^}}load_v7i8_to_v7f32:
; GCN: s_endpgm
define amdgpu_kernel void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <7 x i8>, <7 x i8> addrspace(1)* %in, align 1
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <7 x i8>, <7 x i8> addrspace(1)* %in, i32 %tid
+ %load = load <7 x i8>, <7 x i8> addrspace(1)* %gep, align 1
%cvt = uitofp <7 x i8> %load to <7 x float>
store <7 x float> %cvt, <7 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}load_v8i8_to_v8f32:
-; GCN: buffer_load_dwordx2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
+; GCN: {{buffer|flat}}_load_dwordx2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
; GCN-NOT: bfe
; GCN-NOT: lshr
; GCN-DAG: v_cvt_f32_ubyte3_e32 v{{[0-9]+}}, v[[LOLOAD]]
@@ -147,19 +159,23 @@ define amdgpu_kernel void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
define amdgpu_kernel void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <8 x i8>, <8 x i8> addrspace(1)* %in, align 8
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <8 x i8>, <8 x i8> addrspace(1)* %in, i32 %tid
+ %load = load <8 x i8>, <8 x i8> addrspace(1)* %gep, align 8
%cvt = uitofp <8 x i8> %load to <8 x float>
store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}i8_zext_inreg_i32_to_f32:
-; GCN: buffer_load_dword [[LOADREG:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[LOADREG:v[0-9]+]],
; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, 2, [[LOADREG]]
; GCN-NEXT: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[ADD]]
; GCN: buffer_store_dword [[CONV]],
define amdgpu_kernel void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %load = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %load = load i32, i32 addrspace(1)* %gep, align 4
%add = add i32 %load, 2
%inreg = and i32 %add, 255
%cvt = uitofp i32 %inreg to float
@@ -169,7 +185,9 @@ define amdgpu_kernel void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias
; GCN-LABEL: {{^}}i8_zext_inreg_hi1_to_f32:
define amdgpu_kernel void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %load = load i32, i32 addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %load = load i32, i32 addrspace(1)* %gep, align 4
%inreg = and i32 %load, 65280
%shr = lshr i32 %inreg, 8
%cvt = uitofp i32 %shr to float
@@ -181,7 +199,9 @@ define amdgpu_kernel void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias
; them so it shouldn't really matter.
; GCN-LABEL: {{^}}i8_zext_i32_to_f32:
define amdgpu_kernel void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
- %load = load i8, i8 addrspace(1)* %in, align 1
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i8, i8 addrspace(1)* %in, i32 %tid
+ %load = load i8, i8 addrspace(1)* %gep, align 1
%ext = zext i8 %load to i32
%cvt = uitofp i32 %ext to float
store float %cvt, float addrspace(1)* %out, align 4
@@ -190,7 +210,9 @@ define amdgpu_kernel void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out,
; GCN-LABEL: {{^}}v4i8_zext_v4i32_to_v4f32:
define amdgpu_kernel void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid
+ %load = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 1
%ext = zext <4 x i8> %load to <4 x i32>
%cvt = uitofp <4 x i32> %ext to <4 x float>
store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
@@ -198,12 +220,14 @@ define amdgpu_kernel void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* no
}
; GCN-LABEL: {{^}}extract_byte0_to_f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
; GCN-NOT: [[VAL]]
; GCN: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[CONV]]
define amdgpu_kernel void @extract_byte0_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %val = load i32, i32 addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %gep
%and = and i32 %val, 255
%cvt = uitofp i32 %and to float
store float %cvt, float addrspace(1)* %out
@@ -211,12 +235,14 @@ define amdgpu_kernel void @extract_byte0_to_f32(float addrspace(1)* noalias %out
}
; GCN-LABEL: {{^}}extract_byte1_to_f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
; GCN-NOT: [[VAL]]
; GCN: v_cvt_f32_ubyte1_e32 [[CONV:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[CONV]]
define amdgpu_kernel void @extract_byte1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %val = load i32, i32 addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %gep
%srl = lshr i32 %val, 8
%and = and i32 %srl, 255
%cvt = uitofp i32 %and to float
@@ -225,12 +251,14 @@ define amdgpu_kernel void @extract_byte1_to_f32(float addrspace(1)* noalias %out
}
; GCN-LABEL: {{^}}extract_byte2_to_f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
; GCN-NOT: [[VAL]]
; GCN: v_cvt_f32_ubyte2_e32 [[CONV:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[CONV]]
define amdgpu_kernel void @extract_byte2_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %val = load i32, i32 addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %gep
%srl = lshr i32 %val, 16
%and = and i32 %srl, 255
%cvt = uitofp i32 %and to float
@@ -239,12 +267,14 @@ define amdgpu_kernel void @extract_byte2_to_f32(float addrspace(1)* noalias %out
}
; GCN-LABEL: {{^}}extract_byte3_to_f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
; GCN-NOT: [[VAL]]
; GCN: v_cvt_f32_ubyte3_e32 [[CONV:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[CONV]]
define amdgpu_kernel void @extract_byte3_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
- %val = load i32, i32 addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %val = load i32, i32 addrspace(1)* %gep
%srl = lshr i32 %val, 24
%and = and i32 %srl, 255
%cvt = uitofp i32 %and to float
diff --git a/test/CodeGen/AMDGPU/detect-dead-lanes.mir b/test/CodeGen/AMDGPU/detect-dead-lanes.mir
index 3148b9b8ff9d..c265b8e2ad2e 100644
--- a/test/CodeGen/AMDGPU/detect-dead-lanes.mir
+++ b/test/CodeGen/AMDGPU/detect-dead-lanes.mir
@@ -1,14 +1,4 @@
# RUN: llc -march=amdgcn -run-pass detect-dead-lanes -o - %s | FileCheck %s
---- |
- define amdgpu_kernel void @test0() { ret void }
- define amdgpu_kernel void @test1() { ret void }
- define amdgpu_kernel void @test2() { ret void }
- define amdgpu_kernel void @test3() { ret void }
- define amdgpu_kernel void @test4() { ret void }
- define amdgpu_kernel void @test5() { ret void }
- define amdgpu_kernel void @loop0() { ret void }
- define amdgpu_kernel void @loop1() { ret void }
- define amdgpu_kernel void @loop2() { ret void }
...
---
# Combined use/def transfer check, the basics.
diff --git a/test/CodeGen/AMDGPU/ds_read2.ll b/test/CodeGen/AMDGPU/ds_read2.ll
index 2c474dbe7b08..deb90df99dcf 100644
--- a/test/CodeGen/AMDGPU/ds_read2.ll
+++ b/test/CodeGen/AMDGPU/ds_read2.ll
@@ -9,7 +9,7 @@
; SI-LABEL: @simple_read2_f32
; SI: ds_read2_b32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}} offset1:8
; SI: s_waitcnt lgkmcnt(0)
-; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
+; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[LO_VREG]], v[[HI_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define amdgpu_kernel void @simple_read2_f32(float addrspace(1)* %out) #0 {
@@ -28,7 +28,7 @@ define amdgpu_kernel void @simple_read2_f32(float addrspace(1)* %out) #0 {
; SI-LABEL: @simple_read2_f32_max_offset
; SI: ds_read2_b32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}} offset1:255
; SI: s_waitcnt lgkmcnt(0)
-; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
+; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[LO_VREG]], v[[HI_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define amdgpu_kernel void @simple_read2_f32_max_offset(float addrspace(1)* %out) #0 {
diff --git a/test/CodeGen/AMDGPU/ds_read2_superreg.ll b/test/CodeGen/AMDGPU/ds_read2_superreg.ll
index 3dfdaf3936a6..ef4efc6336ce 100644
--- a/test/CodeGen/AMDGPU/ds_read2_superreg.ll
+++ b/test/CodeGen/AMDGPU/ds_read2_superreg.ll
@@ -38,9 +38,9 @@ define amdgpu_kernel void @simple_read2_v2f32_superreg(<2 x float> addrspace(1)*
; CI-LABEL: {{^}}simple_read2_v4f32_superreg_align4:
; CI-DAG: ds_read2_b32 v{{\[}}[[REG_X:[0-9]+]]:[[REG_Y:[0-9]+]]{{\]}}, v{{[0-9]+}} offset1:1{{$}}
; CI-DAG: ds_read2_b32 v{{\[}}[[REG_Z:[0-9]+]]:[[REG_W:[0-9]+]]{{\]}}, v{{[0-9]+}} offset0:2 offset1:3{{$}}
-; CI-DAG: v_add_f32_e32 v[[ADD0:[0-9]+]], v[[REG_Z]], v[[REG_X]]
-; CI-DAG: v_add_f32_e32 v[[ADD1:[0-9]+]], v[[REG_W]], v[[REG_Y]]
-; CI: v_add_f32_e32 v[[ADD2:[0-9]+]], v[[ADD1]], v[[ADD0]]
+; CI-DAG: v_add_f32_e32 v[[ADD0:[0-9]+]], v[[REG_X]], v[[REG_Z]]
+; CI-DAG: v_add_f32_e32 v[[ADD1:[0-9]+]], v[[REG_Y]], v[[REG_W]]
+; CI: v_add_f32_e32 v[[ADD2:[0-9]+]], v[[ADD0]], v[[ADD1]]
; CI: buffer_store_dword v[[ADD2]]
; CI: s_endpgm
define amdgpu_kernel void @simple_read2_v4f32_superreg_align4(float addrspace(1)* %out) #0 {
@@ -64,8 +64,8 @@ define amdgpu_kernel void @simple_read2_v4f32_superreg_align4(float addrspace(1)
; CI-LABEL: {{^}}simple_read2_v3f32_superreg_align4:
; CI-DAG: ds_read2_b32 v{{\[}}[[REG_X:[0-9]+]]:[[REG_Y:[0-9]+]]{{\]}}, v{{[0-9]+}} offset1:1{{$}}
; CI-DAG: ds_read_b32 v[[REG_Z:[0-9]+]], v{{[0-9]+}} offset:8{{$}}
-; CI-DAG: v_add_f32_e32 v[[ADD0:[0-9]+]], v[[REG_Z]], v[[REG_X]]
-; CI-DAG: v_add_f32_e32 v[[ADD1:[0-9]+]], v[[REG_Y]], v[[ADD0]]
+; CI-DAG: v_add_f32_e32 v[[ADD0:[0-9]+]], v[[REG_X]], v[[REG_Z]]
+; CI-DAG: v_add_f32_e32 v[[ADD1:[0-9]+]], v[[ADD0]], v[[REG_Y]]
; CI: buffer_store_dword v[[ADD1]]
; CI: s_endpgm
define amdgpu_kernel void @simple_read2_v3f32_superreg_align4(float addrspace(1)* %out) #0 {
diff --git a/test/CodeGen/AMDGPU/ds_read2st64.ll b/test/CodeGen/AMDGPU/ds_read2st64.ll
index 81b35a46aa18..b1fba8c240d7 100644
--- a/test/CodeGen/AMDGPU/ds_read2st64.ll
+++ b/test/CodeGen/AMDGPU/ds_read2st64.ll
@@ -7,7 +7,7 @@
; SI-LABEL: @simple_read2st64_f32_0_1
; SI: ds_read2st64_b32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}} offset1:1
; SI: s_waitcnt lgkmcnt(0)
-; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
+; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[LO_VREG]], v[[HI_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define amdgpu_kernel void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 {
@@ -26,7 +26,7 @@ define amdgpu_kernel void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0
; SI-LABEL: @simple_read2st64_f32_1_2
; SI: ds_read2st64_b32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}} offset0:1 offset1:2
; SI: s_waitcnt lgkmcnt(0)
-; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
+; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[LO_VREG]], v[[HI_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define amdgpu_kernel void @simple_read2st64_f32_1_2(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
@@ -46,7 +46,7 @@ define amdgpu_kernel void @simple_read2st64_f32_1_2(float addrspace(1)* %out, fl
; SI-LABEL: @simple_read2st64_f32_max_offset
; SI: ds_read2st64_b32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}} offset0:1 offset1:255
; SI: s_waitcnt lgkmcnt(0)
-; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
+; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[LO_VREG]], v[[HI_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define amdgpu_kernel void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
diff --git a/test/CodeGen/AMDGPU/early-if-convert-cost.ll b/test/CodeGen/AMDGPU/early-if-convert-cost.ll
index ace01593808b..74404989f8c7 100644
--- a/test/CodeGen/AMDGPU/early-if-convert-cost.ll
+++ b/test/CodeGen/AMDGPU/early-if-convert-cost.ll
@@ -1,4 +1,4 @@
-; RUN: llc -stress-early-ifcvt -amdgpu-early-ifcvt=1 -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -stress-early-ifcvt -amdgpu-early-ifcvt=1 -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; FIXME: Most of these cases that don't trigger because of broken cost
; heuristics. Should not need -stress-early-ifcvt
diff --git a/test/CodeGen/AMDGPU/early-if-convert.ll b/test/CodeGen/AMDGPU/early-if-convert.ll
index 9439130deb9e..792f0b1eaef4 100644
--- a/test/CodeGen/AMDGPU/early-if-convert.ll
+++ b/test/CodeGen/AMDGPU/early-if-convert.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; XUN: llc -march=amdgcn -mcpu=tonga -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; FIXME: This leaves behind a now unnecessary and with exec
diff --git a/test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll b/test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll
index 6eb1fc1d0cc2..b7dfcd99029a 100644
--- a/test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll
+++ b/test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll
@@ -2,16 +2,21 @@
; RUN: llc -march=amdgcn -enable-no-signed-zeros-fp-math=1 < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-UNSAFE %s
; RUN: llc -march=amdgcn -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-UNSAFE %s
+declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+
; Test that the -enable-no-signed-zeros-fp-math flag works
; GCN-LABEL: {{^}}fneg_fsub_f32:
-; GCN: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; GCN: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
; GCN-UNSAFE-NOT: xor
define amdgpu_kernel void @fneg_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
- %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
- %a = load float, float addrspace(1)* %in, align 4
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %add = add i32 %tid, 1
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr float, float addrspace(1)* %in, i32 %add
+ %a = load float, float addrspace(1)* %gep, align 4
%b = load float, float addrspace(1)* %b_ptr, align 4
%result = fsub float %a, %b
%neg.result = fsub float -0.0, %result
diff --git a/test/CodeGen/AMDGPU/extractelt-to-trunc.ll b/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
index 34999fa3aea4..3fb452de1ccf 100644
--- a/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
+++ b/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
@@ -1,5 +1,7 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+
; Make sure the add and load are reduced to 32-bits even with the
; bitcast to vector.
; GCN-LABEL: {{^}}bitcast_int_to_vector_extract_0:
@@ -8,7 +10,9 @@
; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, [[B]], [[A]]
; GCN: buffer_store_dword [[ADD]]
define amdgpu_kernel void @bitcast_int_to_vector_extract_0(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %b) {
- %a = load i64, i64 addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %a = load i64, i64 addrspace(1)* %gep
%add = add i64 %a, %b
%val.bc = bitcast i64 %add to <2 x i32>
%extract = extractelement <2 x i32> %val.bc, i32 0
@@ -21,7 +25,9 @@ define amdgpu_kernel void @bitcast_int_to_vector_extract_0(i32 addrspace(1)* %ou
; GCN: v_add_f64
; GCN: buffer_store_dword v
define amdgpu_kernel void @bitcast_fp_to_vector_extract_0(i32 addrspace(1)* %out, double addrspace(1)* %in, double %b) {
- %a = load double, double addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %a = load double, double addrspace(1)* %gep
%add = fadd double %a, %b
%val.bc = bitcast double %add to <2 x i32>
%extract = extractelement <2 x i32> %val.bc, i32 0
@@ -34,7 +40,9 @@ define amdgpu_kernel void @bitcast_fp_to_vector_extract_0(i32 addrspace(1)* %out
; GCN: v_add_i32
; GCN: buffer_store_dword
define amdgpu_kernel void @bitcast_int_to_fpvector_extract_0(float addrspace(1)* %out, i64 addrspace(1)* %in, i64 %b) {
- %a = load i64, i64 addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %a = load i64, i64 addrspace(1)* %gep
%add = add i64 %a, %b
%val.bc = bitcast i64 %add to <2 x float>
%extract = extractelement <2 x float> %val.bc, i32 0
diff --git a/test/CodeGen/AMDGPU/fabs.f16.ll b/test/CodeGen/AMDGPU/fabs.f16.ll
index 4e2ec4b3054f..d56d5ec1411a 100644
--- a/test/CodeGen/AMDGPU/fabs.f16.ll
+++ b/test/CodeGen/AMDGPU/fabs.f16.ll
@@ -39,9 +39,9 @@ define amdgpu_kernel void @s_fabs_f16(half addrspace(1)* %out, half %in) {
; VI: flat_load_ushort [[HI:v[0-9]+]]
; VI: flat_load_ushort [[LO:v[0-9]+]]
; VI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x7fff{{$}}
-; VI-DAG: v_and_b32_e32 [[FABS_LO:v[0-9]+]], [[MASK]], [[HI]]
+; VI-DAG: v_and_b32_e32 [[FABS_LO:v[0-9]+]], [[HI]], [[MASK]]
; VI-DAG: v_and_b32_sdwa [[FABS_HI:v[0-9]+]], [[LO]], [[MASK]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, [[FABS_HI]], [[FABS_LO]]
+; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, [[FABS_LO]], [[FABS_HI]]
; VI: flat_store_dword
; GFX9: s_load_dword [[VAL:s[0-9]+]]
@@ -62,8 +62,8 @@ define amdgpu_kernel void @s_fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half
; VI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x7fff{{$}}
; VI-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, [[MASK]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, [[MASK]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
-; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[MASK]]
+; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[MASK]]
; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
@@ -80,7 +80,7 @@ define amdgpu_kernel void @s_fabs_v4f16(<4 x half> addrspace(1)* %out, <4 x half
; CI-DAG: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[IN0]]
; CI-DAG: v_cvt_f32_f16_e64 [[ABS_CVT1:v[0-9]+]], |[[IN1]]|
-; CI: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[CVT0]], [[ABS_CVT1]]
+; CI: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[ABS_CVT1]], [[CVT0]]
; CI: v_cvt_f16_f32_e32 [[CVTRESULT:v[0-9]+]], [[RESULT]]
; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVTRESULT]]
@@ -134,7 +134,9 @@ define amdgpu_kernel void @fabs_free_v2f16(<2 x half> addrspace(1)* %out, i32 %i
; GFX9: v_and_b32_e32 [[FABS:v[0-9]+]], 0x7fff7fff, [[VAL]]
; GFX9: v_pk_mul_f16 v{{[0-9]+}}, [[FABS]], v{{[0-9]+$}}
define amdgpu_kernel void @v_fabs_fold_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
- %val = load <2 x half>, <2 x half> addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %in, i32 %tid
+ %val = load <2 x half>, <2 x half> addrspace(1)* %gep
%fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
%fmul = fmul <2 x half> %fabs, %val
store <2 x half> %fmul, <2 x half> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll b/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
index 9edf55cbc69f..0c4a77964d15 100644
--- a/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
+++ b/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
@@ -16,8 +16,8 @@
; GCN: buffer_load_dword [[U:v[0-9]+]]
; GCN: buffer_load_dword [[V:v[0-9]+]]
-; GCN-FLUSH: v_mac_f32_e32 [[Z]], [[V]], [[U]]
-; GCN-FLUSH-NEXT: v_mac_f32_e32 [[Z]], [[Y]], [[X]]
+; GCN-FLUSH: v_mac_f32_e32 [[Z]], [[U]], [[V]]
+; GCN-FLUSH-NEXT: v_mac_f32_e32 [[Z]], [[X]], [[Y]]
; GCN-FLUSH-NEXT: buffer_store_dword [[Z]]
; GCN-FASTFMA: v_fma_f32 [[FMA0:v[0-9]+]], [[U]], [[V]], [[Z]]
@@ -49,7 +49,7 @@ define amdgpu_kernel void @fast_add_fmuladd_fmul() #0 {
; GCN: buffer_load_dword [[V:v[0-9]+]]
; GCN-FLUSH: v_mad_f32 [[TMP:v[0-9]]], [[U]], [[V]], -[[Z]]
-; GCN-FLUSH-NEXT: v_mac_f32_e32 [[TMP]], [[Y]], [[X]]
+; GCN-FLUSH-NEXT: v_mac_f32_e32 [[TMP]], [[X]], [[Y]]
; GCN-FLUSH-NEXT: buffer_store_dword [[Z]]
; GCN-FASTFMA: v_fma_f32 [[FMA0:v[0-9]+]], [[U]], [[V]], -[[Z]]
@@ -75,13 +75,13 @@ define amdgpu_kernel void @fast_sub_fmuladd_fmul() #0 {
; GCN: buffer_load_dword [[U:v[0-9]+]]
; GCN: buffer_load_dword [[V:v[0-9]+]]
-; GCN-FLUSH-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[V]], [[U]]
-; GCN-FLUSH-DAG: v_mac_f32_e32 [[MUL]], [[Y]], [[X]]
-; GCN-FLUSH: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[U]]
+; GCN-FLUSH-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[U]], [[V]]
+; GCN-FLUSH-DAG: v_mac_f32_e32 [[MUL]], [[X]], [[Y]]
+; GCN-FLUSH: v_add_f32_e32 v{{[0-9]+}}, [[U]], [[Z]]
-; GCN-FASTFMA: v_mul_f32_e32 [[MUL:v[0-9]+]], [[V]], [[U]]
+; GCN-FASTFMA: v_mul_f32_e32 [[MUL:v[0-9]+]], [[U]], [[V]]
; GCN-FASTFMA: v_fma_f32 [[FMA1:v[0-9]+]], [[X]], [[Y]], [[MUL]]
-; GCN-FASTFMA: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[FMA1]]
+; GCN-FASTFMA: v_add_f32_e32 v{{[0-9]+}}, [[FMA1]], [[Z]]
; GCN-SLOWFMA: v_mul_f32_e32
; GCN-SLOWFMA: v_mul_f32_e32
@@ -108,13 +108,13 @@ define amdgpu_kernel void @fast_add_fmuladd_fmul_multi_use_mul() #0 {
; GCN: buffer_load_dword [[U:v[0-9]+]]
; GCN: buffer_load_dword [[V:v[0-9]+]]
-; GCN-FLUSH-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[V]], [[U]]
-; GCN-FLUSH-DAG: v_mac_f32_e32 [[MUL]], [[Y]], [[X]]
-; GCN-FLUSH: v_add_f32_e32 v{{[0-9]+}}, [[U]], [[Z]]
+; GCN-FLUSH-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[U]], [[V]]
+; GCN-FLUSH-DAG: v_mac_f32_e32 [[MUL]], [[X]], [[Y]]
+; GCN-FLUSH: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[U]]
-; GCN-FASTFMA: v_mul_f32_e32 [[MUL:v[0-9]+]], [[V]], [[U]]
+; GCN-FASTFMA: v_mul_f32_e32 [[MUL:v[0-9]+]], [[U]], [[V]]
; GCN-FASTFMA: v_fma_f32 [[FMA1:v[0-9]+]], [[X]], [[Y]], [[MUL]]
-; GCN-FASTFMA: v_add_f32_e32 v{{[0-9]+}}, [[FMA1]], [[Z]]
+; GCN-FASTFMA: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[FMA1]]
; GCN-SLOWFMA: v_mul_f32_e32
; GCN-SLOWFMA: v_mul_f32_e32
@@ -191,17 +191,17 @@ define amdgpu_kernel void @fast_add_fmuladd_fmul_multi_use_fmuladd_commute() #0
; GCN: buffer_load_dword [[U:v[0-9]+]]
; GCN: buffer_load_dword [[V:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[V]], [[U]]
+; GCN-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[U]], [[V]]
-; GCN-FLUSH: v_mad_f32 [[MAD:v[0-9]+]], [[Y]], [[X]], [[MUL]]
-; GCN-FLUSH: v_subrev_f32_e32 [[SUB:v[0-9]+]], [[Z]], [[MAD]]
+; GCN-FLUSH: v_mad_f32 [[MAD:v[0-9]+]], [[X]], [[Y]], [[MUL]]
+; GCN-FLUSH: v_sub_f32_e32 [[SUB:v[0-9]+]], [[MAD]], [[Z]]
; GCN-FASTFMA: v_fma_f32 [[MAD:v[0-9]+]], [[X]], [[Y]], [[MUL]]
-; GCN-FASTFMA: v_subrev_f32_e32 [[SUB:v[0-9]+]], [[Z]], [[MAD]]
+; GCN-FASTFMA: v_sub_f32_e32 [[SUB:v[0-9]+]], [[MAD]], [[Z]]
-; GCN-SLOWFMA-DAG: v_mul_f32_e32 v{{[0-9]+}}, [[Y]], [[X]]
+; GCN-SLOWFMA-DAG: v_mul_f32_e32 v{{[0-9]+}}, [[X]], [[Y]]
; GCN-SLOWFMA: v_add_f32_e32
-; GCN-SLOWFMA: v_subrev_f32_e32 [[MAD:v[0-9]+]]
+; GCN-SLOWFMA: v_sub_f32_e32 [[MAD:v[0-9]+]]
; GCN: buffer_store_dword [[MUL]]
; GCN: buffer_store_dword [[MAD]]
@@ -226,21 +226,21 @@ define amdgpu_kernel void @fast_sub_fmuladd_fmul_multi_use_mul() #0 {
; GCN: buffer_load_dword [[U:v[0-9]+]]
; GCN: buffer_load_dword [[V:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[V]], [[U]]
+; GCN-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[U]], [[V]]
-; GCN-FLUSH-NEXT: v_mac_f32_e32 [[MUL]], [[Y]], [[X]]
-; GCN-FLUSH-NEXT: v_subrev_f32_e32 [[SUB:v[0-9]+]], [[Z]], [[MUL]]
+; GCN-FLUSH-NEXT: v_mac_f32_e32 [[MUL]], [[X]], [[Y]]
+; GCN-FLUSH-NEXT: v_sub_f32_e32 [[SUB:v[0-9]+]], [[MUL]], [[Z]]
; GCN-FLUSH-NEXT: buffer_store_dword [[MUL]]
; GCN-FLUSH-NEXT: buffer_store_dword [[SUB]]
; GCN-FASTFMA-NEXT: v_fma_f32 [[FMA:v[0-9]+]], [[X]], [[Y]], [[U]]
-; GCN-FASTFMA-NEXT: v_subrev_f32_e32 [[SUB:v[0-9]+]], [[Z]], [[FMA]]
+; GCN-FASTFMA-NEXT: v_sub_f32_e32 [[SUB:v[0-9]+]], [[FMA]], [[Z]]
; GCN-FASTFMA-NEXT: buffer_store_dword [[FMA]]
; GCN-FASTFMA-NEXT: buffer_store_dword [[SUB]]
-; GCN-SLOWFMA-DAG: v_mul_f32_e32 v{{[0-9]+}}, [[Y]], [[X]]
+; GCN-SLOWFMA-DAG: v_mul_f32_e32 v{{[0-9]+}}, [[X]], [[Y]]
; GCN-SLOWFMA: v_add_f32_e32
-; GCN-SLOWFMA: v_subrev_f32_e32
+; GCN-SLOWFMA: v_sub_f32_e32
define amdgpu_kernel void @fast_sub_fmuladd_fmul_multi_use_fmuladd() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
diff --git a/test/CodeGen/AMDGPU/fadd.f16.ll b/test/CodeGen/AMDGPU/fadd.f16.ll
index 08199be144f4..88b3be0e0d31 100644
--- a/test/CodeGen/AMDGPU/fadd.f16.ll
+++ b/test/CodeGen/AMDGPU/fadd.f16.ll
@@ -2,13 +2,13 @@
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}fadd_f16
-; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
-; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: {{buffer|flat}}_load_ushort v[[A_F16:[0-9]+]]
+; GCN: {{buffer|flat}}_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fadd_f16(
@@ -24,7 +24,7 @@ entry:
}
; GCN-LABEL: {{^}}fadd_f16_imm_a
-; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: {{buffer|flat}}_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], 1.0, v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
@@ -42,7 +42,7 @@ entry:
}
; GCN-LABEL: {{^}}fadd_f16_imm_b
-; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: {{buffer|flat}}_load_ushort v[[A_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], 2.0, v[[A_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
@@ -60,8 +60,8 @@ entry:
}
; GCN-LABEL: {{^}}fadd_v2f16:
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword v[[B_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
@@ -70,16 +70,16 @@ entry:
; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI-DAG: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI-DAG: v_add_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI-DAG: v_add_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
-; VI-DAG: v_add_f16_e32 v[[R_F16_LO:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_add_f16_e32 v[[R_F16_LO:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]]
; VI-DAG: v_add_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_LO]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
@@ -88,15 +88,18 @@ define amdgpu_kernel void @fadd_v2f16(
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
entry:
- %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
- %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.a = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %a, i32 %tid
+ %gep.b = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %b, i32 %tid
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %gep.a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %gep.b
%r.val = fadd <2 x half> %a.val, %b.val
store <2 x half> %r.val, <2 x half> addrspace(1)* %r
ret void
}
; GCN-LABEL: {{^}}fadd_v2f16_imm_a:
-; GCN-DAG: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dword v[[B_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
@@ -105,12 +108,12 @@ entry:
; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], 2.0, v[[B_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_mov_b32_e32 v[[CONST2:[0-9]+]], 0x4000
; VI-DAG: v_add_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[B_V2_F16]], v[[CONST2]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-DAG: v_add_f16_e32 v[[R_F16_0:[0-9]+]], 1.0, v[[B_V2_F16]]
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
@@ -118,14 +121,16 @@ define amdgpu_kernel void @fadd_v2f16_imm_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %b) {
entry:
- %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.b = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %b, i32 %tid
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %gep.b
%r.val = fadd <2 x half> <half 1.0, half 2.0>, %b.val
store <2 x half> %r.val, <2 x half> addrspace(1)* %r
ret void
}
; GCN-LABEL: {{^}}fadd_v2f16_imm_b:
-; GCN-DAG: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dword v[[A_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; SI-DAG: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
@@ -134,12 +139,12 @@ entry:
; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], 1.0, v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_mov_b32_e32 v[[CONST1:[0-9]+]], 0x3c00
; VI-DAG: v_add_f16_sdwa v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[CONST1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-DAG: v_add_f16_e32 v[[R_F16_1:[0-9]+]], 2.0, v[[A_V2_F16]]
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
@@ -147,8 +152,15 @@ define amdgpu_kernel void @fadd_v2f16_imm_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
- %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.a = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %a, i32 %tid
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %gep.a
%r.val = fadd <2 x half> %a.val, <half 2.0, half 1.0>
store <2 x half> %r.val, <2 x half> addrspace(1)* %r
ret void
}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fadd64.ll b/test/CodeGen/AMDGPU/fadd64.ll
index c936d98673ba..8fd1f52006fb 100644
--- a/test/CodeGen/AMDGPU/fadd64.ll
+++ b/test/CodeGen/AMDGPU/fadd64.ll
@@ -5,8 +5,11 @@
; CHECK: v_add_f64 {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}
define amdgpu_kernel void @v_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
- %r0 = load double, double addrspace(1)* %in1
- %r1 = load double, double addrspace(1)* %in2
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr inbounds double, double addrspace(1)* %in1, i32 %tid
+ %gep2 = getelementptr inbounds double, double addrspace(1)* %in2, i32 %tid
+ %r0 = load double, double addrspace(1)* %gep1
+ %r1 = load double, double addrspace(1)* %gep2
%r2 = fadd double %r0, %r1
store double %r2, double addrspace(1)* %out
ret void
@@ -42,3 +45,8 @@ define amdgpu_kernel void @s_fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x do
store <2 x double> %r2, <2 x double> addrspace(1)* %out
ret void
}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll b/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
new file mode 100644
index 000000000000..5383bbe71ae3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
@@ -0,0 +1,487 @@
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -mattr=-fp32-denormals < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=GCN-FLUSH %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -mattr=-fp32-denormals,+fp-exceptions < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-EXCEPT -check-prefix=VI -check-prefix=GCN-FLUSH %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -verify-machineinstrs -mattr=+fp32-denormals < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX9-DENORM %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -verify-machineinstrs -mattr=-fp32-denormals < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GCN-FLUSH %s
+
+; GCN-LABEL: {{^}}test_no_fold_canonicalize_loaded_value_f32:
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_no_fold_canonicalize_loaded_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %v = load float, float addrspace(1)* %gep, align 4
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_fmul_value_f32:
+; GCN: v_mul_f32_e32 [[V:v[0-9]+]], 0x41700000, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fmul_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = fmul float %load, 15.0
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_sub_value_f32:
+; GCN: v_sub_f32_e32 [[V:v[0-9]+]], 0x41700000, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_sub_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = fsub float 15.0, %load
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_add_value_f32:
+; GCN: v_add_f32_e32 [[V:v[0-9]+]], 0x41700000, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_add_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = fadd float %load, 15.0
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_sqrt_value_f32:
+; GCN: v_sqrt_f32_e32 [[V:v[0-9]+]], v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_sqrt_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = call float @llvm.sqrt.f32(float %load)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fceil_value_f32:
+; GCN: v_ceil_f32_e32 [[V:v[0-9]+]], v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fceil_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = call float @llvm.ceil.f32(float %load)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_floor_value_f32:
+; GCN: v_floor_f32_e32 [[V:v[0-9]+]], v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_floor_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = call float @llvm.floor.f32(float %load)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fma_value_f32:
+; GCN: v_fma_f32 [[V:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fma_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = call float @llvm.fma.f32(float %load, float 15.0, float 15.0)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fmuladd_value_f32:
+; GCN-FLUSH: v_mac_f32_e32 [[V:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GFX9-DENORM: v_fma_f32 [[V:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fmuladd_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = call float @llvm.fmuladd.f32(float %load, float 15.0, float 15.0)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_canonicalize_value_f32:
+; GCN: flat_load_dword [[LOAD:v[0-9]+]],
+; GCN: v_mul_f32_e32 [[V:v[0-9]+]], 1.0, [[LOAD]]
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_canonicalize_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = call float @llvm.canonicalize.f32(float %load)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fpextend_value_f64_f32:
+; GCN: v_cvt_f64_f32_e32 [[V:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}
+; GCN: flat_store_dwordx2 v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fpextend_value_f64_f32(float addrspace(1)* %arg, double addrspace(1)* %out) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = fpext float %load to double
+ %canonicalized = tail call double @llvm.canonicalize.f64(double %v)
+ %gep2 = getelementptr inbounds double, double addrspace(1)* %out, i32 %id
+ store double %canonicalized, double addrspace(1)* %gep2, align 8
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fpextend_value_f32_f16:
+; GCN: v_cvt_f32_f16_e32 [[V:v[0-9]+]], v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fpextend_value_f32_f16(half addrspace(1)* %arg, float addrspace(1)* %out) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds half, half addrspace(1)* %arg, i32 %id
+ %load = load half, half addrspace(1)* %gep, align 2
+ %v = fpext half %load to float
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ %gep2 = getelementptr inbounds float, float addrspace(1)* %out, i32 %id
+ store float %canonicalized, float addrspace(1)* %gep2, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fpround_value_f32_f64:
+; GCN: v_cvt_f32_f64_e32 [[V:v[0-9]+]], v[{{[0-9:]+}}]
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fpround_value_f32_f64(double addrspace(1)* %arg, float addrspace(1)* %out) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds double, double addrspace(1)* %arg, i32 %id
+ %load = load double, double addrspace(1)* %gep, align 8
+ %v = fptrunc double %load to float
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ %gep2 = getelementptr inbounds float, float addrspace(1)* %out, i32 %id
+ store float %canonicalized, float addrspace(1)* %gep2, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fpround_value_f16_f32:
+; GCN: v_cvt_f16_f32_e32 [[V:v[0-9]+]], v{{[0-9]+}}
+; GCN: flat_store_short v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fpround_value_f16_f32(float addrspace(1)* %arg, half addrspace(1)* %out) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = fptrunc float %load to half
+ %canonicalized = tail call half @llvm.canonicalize.f16(half %v)
+ %gep2 = getelementptr inbounds half, half addrspace(1)* %out, i32 %id
+ store half %canonicalized, half addrspace(1)* %gep2, align 2
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fpround_value_v2f16_v2f32:
+; GCN-DAG: v_cvt_f16_f32_e32 [[V0:v[0-9]+]], v{{[0-9]+}}
+; VI-DAG: v_cvt_f16_f32_sdwa [[V1:v[0-9]+]], v{{[0-9]+}}
+; VI: v_or_b32_e32 [[V:v[0-9]+]], [[V0]], [[V1]]
+; GFX9: v_cvt_f16_f32_e32 [[V1:v[0-9]+]], v{{[0-9]+}}
+; GFX9: v_and_b32_e32 [[V0_16:v[0-9]+]], 0xffff, [[V0]]
+; GFX9: v_lshl_or_b32 [[V:v[0-9]+]], [[V1]], 16, [[V0_16]]
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fpround_value_v2f16_v2f32(<2 x float> addrspace(1)* %arg, <2 x half> addrspace(1)* %out) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds <2 x float>, <2 x float> addrspace(1)* %arg, i32 %id
+ %load = load <2 x float>, <2 x float> addrspace(1)* %gep, align 8
+ %v = fptrunc <2 x float> %load to <2 x half>
+ %canonicalized = tail call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %v)
+ %gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i32 %id
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %gep2, align 4
+ ret void
+}
+
+; GCN-LABEL: test_no_fold_canonicalize_fneg_value_f32:
+; GCN: v_mul_f32_e64 v{{[0-9]+}}, 1.0, -v{{[0-9]+}}
+define amdgpu_kernel void @test_no_fold_canonicalize_fneg_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = fsub float -0.0, %load
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fneg_value_f32:
+; GCN: v_xor_b32_e32 [[V:v[0-9]+]], 0x80000000, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fneg_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v0 = fadd float %load, 0.0
+ %v = fsub float -0.0, %v0
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_no_fold_canonicalize_fabs_value_f32:
+; GCN: v_mul_f32_e64 v{{[0-9]+}}, 1.0, |v{{[0-9]+}}|
+define amdgpu_kernel void @test_no_fold_canonicalize_fabs_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = tail call float @llvm.fabs.f32(float %load)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_fabs_value_f32:
+; GCN: v_and_b32_e32 [[V:v[0-9]+]], 0x7fffffff, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_fabs_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v0 = fadd float %load, 0.0
+ %v = tail call float @llvm.fabs.f32(float %v0)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_sin_value_f32:
+; GCN: v_sin_f32_e32 [[V:v[0-9]+]], v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_sin_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = tail call float @llvm.sin.f32(float %load)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_cos_value_f32:
+; GCN: v_cos_f32_e32 [[V:v[0-9]+]], v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_cos_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = tail call float @llvm.cos.f32(float %load)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_sin_value_f16:
+; GCN: v_sin_f32_e32 [[V0:v[0-9]+]], v{{[0-9]+}}
+; GCN: v_cvt_f16_f32_e32 [[V:v[0-9]+]], [[V0]]
+; GCN: flat_store_short v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_sin_value_f16(half addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds half, half addrspace(1)* %arg, i32 %id
+ %load = load half, half addrspace(1)* %gep, align 2
+ %v = tail call half @llvm.sin.f16(half %load)
+ %canonicalized = tail call half @llvm.canonicalize.f16(half %v)
+ store half %canonicalized, half addrspace(1)* %gep, align 2
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_cos_value_f16:
+; GCN: v_cos_f32_e32 [[V0:v[0-9]+]], v{{[0-9]+}}
+; GCN: v_cvt_f16_f32_e32 [[V:v[0-9]+]], [[V0]]
+; GCN: flat_store_short v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_cos_value_f16(half addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds half, half addrspace(1)* %arg, i32 %id
+ %load = load half, half addrspace(1)* %gep, align 2
+ %v = tail call half @llvm.cos.f16(half %load)
+ %canonicalized = tail call half @llvm.canonicalize.f16(half %v)
+ store half %canonicalized, half addrspace(1)* %gep, align 2
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_qNaN_value_f32:
+; GCN: v_mov_b32_e32 [[V:v[0-9]+]], 0x7fc00000
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_qNaN_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %canonicalized = tail call float @llvm.canonicalize.f32(float 0x7FF8000000000000)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_minnum_value_from_load_f32:
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_fold_canonicalize_minnum_value_from_load_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = tail call float @llvm.minnum.f32(float %load, float 0.0)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_minnum_value_f32:
+; GCN: v_min_f32_e32 [[V:v[0-9]+]], 0, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_minnum_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v0 = fadd float %load, 0.0
+ %v = tail call float @llvm.minnum.f32(float %v0, float 0.0)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_sNaN_value_f32:
+; GCN: v_min_f32_e32 [[V0:v[0-9]+]], 0x7f800001, v{{[0-9]+}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 1.0, [[V0]]
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+define amdgpu_kernel void @test_fold_canonicalize_sNaN_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = tail call float @llvm.minnum.f32(float %load, float bitcast (i32 2139095041 to float))
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_denorm_value_f32:
+; GCN: v_min_f32_e32 [[V0:v[0-9]+]], 0x7fffff, v{{[0-9]+}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 1.0, [[V0]]
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+define amdgpu_kernel void @test_fold_canonicalize_denorm_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = tail call float @llvm.minnum.f32(float %load, float bitcast (i32 8388607 to float))
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_maxnum_value_from_load_f32:
+; GCN: v_max_f32_e32 [[V0:v[0-9]+]], 0, v{{[0-9]+}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 1.0, [[V0]]
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+define amdgpu_kernel void @test_fold_canonicalize_maxnum_value_from_load_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v = tail call float @llvm.maxnum.f32(float %load, float 0.0)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_maxnum_value_f32:
+; GCN: v_max_f32_e32 [[V:v[0-9]+]], 0, v{{[0-9]+}}
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_maxnum_value_f32(float addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %load = load float, float addrspace(1)* %gep, align 4
+ %v0 = fadd float %load, 0.0
+ %v = tail call float @llvm.maxnum.f32(float %v0, float 0.0)
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ store float %canonicalized, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: test_fold_canonicalize_maxnum_value_f64:
+; GCN: v_max_f64 [[V:v\[[0-9]+:[0-9]+\]]], v[{{[0-9:]+}}], 0
+; GCN: flat_store_dwordx2 v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_maxnum_value_f64(double addrspace(1)* %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds double, double addrspace(1)* %arg, i32 %id
+ %load = load double, double addrspace(1)* %gep, align 8
+ %v0 = fadd double %load, 0.0
+ %v = tail call double @llvm.maxnum.f64(double %v0, double 0.0)
+ %canonicalized = tail call double @llvm.canonicalize.f64(double %v)
+ store double %canonicalized, double addrspace(1)* %gep, align 8
+ ret void
+}
+
+; GCN-LABEL: test_no_fold_canonicalize_fmul_value_f32_no_ieee:
+; GCN-EXCEPT: v_mul_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
+define amdgpu_ps float @test_no_fold_canonicalize_fmul_value_f32_no_ieee(float %arg) {
+entry:
+ %v = fmul float %arg, 15.0
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ ret float %canonicalized
+}
+
+; GCN-LABEL: test_fold_canonicalize_fmul_nnan_value_f32_no_ieee:
+; GCN: v_mul_f32_e32 [[V:v[0-9]+]], 0x41700000, v{{[0-9]+}}
+; GCN-NEXT: ; return
+; GCN-NOT: 1.0
+define amdgpu_ps float @test_fold_canonicalize_fmul_nnan_value_f32_no_ieee(float %arg) {
+entry:
+ %v = fmul nnan float %arg, 15.0
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ ret float %canonicalized
+}
+
+declare float @llvm.canonicalize.f32(float) #0
+declare double @llvm.canonicalize.f64(double) #0
+declare half @llvm.canonicalize.f16(half) #0
+declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare float @llvm.sqrt.f32(float) #0
+declare float @llvm.ceil.f32(float) #0
+declare float @llvm.floor.f32(float) #0
+declare float @llvm.fma.f32(float, float, float) #0
+declare float @llvm.fmuladd.f32(float, float, float) #0
+declare float @llvm.fabs.f32(float) #0
+declare float @llvm.sin.f32(float) #0
+declare float @llvm.cos.f32(float) #0
+declare half @llvm.sin.f16(half) #0
+declare half @llvm.cos.f16(half) #0
+declare float @llvm.minnum.f32(float, float) #0
+declare float @llvm.maxnum.f32(float, float) #0
+declare double @llvm.maxnum.f64(double, double) #0
+
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fcanonicalize.f16.ll b/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
index 404358f0ecb9..dd8e277c1c75 100644
--- a/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
+++ b/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
@@ -5,6 +5,8 @@ declare half @llvm.fabs.f16(half) #0
declare half @llvm.canonicalize.f16(half) #0
declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #0
declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
; GCN-LABEL: {{^}}v_test_canonicalize_var_f16:
; GCN: v_mul_f16_e32 [[REG:v[0-9]+]], 1.0, {{v[0-9]+}}
@@ -213,7 +215,9 @@ define amdgpu_kernel void @test_fold_canonicalize_snan3_value_f16(half addrspace
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, {{v[0-9]+$}}
; GFX9: buffer_store_dword [[REG]]
define amdgpu_kernel void @v_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
- %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+ %val = load <2 x half>, <2 x half> addrspace(1)* %gep
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -233,7 +237,9 @@ define amdgpu_kernel void @v_test_canonicalize_var_v2f16(<2 x half> addrspace(1)
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, [[ABS]]{{$}}
; GCN: buffer_store_dword
define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
- %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+ %val = load <2 x half>, <2 x half> addrspace(1)* %gep
%val.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val.fabs)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
@@ -251,7 +257,9 @@ define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2f16(<2 x half> addrspa
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, [[ABS]] neg_lo:[0,1] neg_hi:[0,1]{{$}}
; GCN: buffer_store_dword
define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
- %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+ %val = load <2 x half>, <2 x half> addrspace(1)* %gep
%val.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
%val.fabs.fneg = fsub <2 x half> <half -0.0, half -0.0>, %val.fabs
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val.fabs.fneg)
@@ -270,7 +278,9 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2f16(<2 x half> ad
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, {{v[0-9]+}} neg_lo:[0,1] neg_hi:[0,1]{{$}}
; GFX9: buffer_store_dword [[REG]]
define amdgpu_kernel void @v_test_canonicalize_fneg_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
- %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+ %val = load <2 x half>, <2 x half> addrspace(1)* %gep
%fneg.val = fsub <2 x half> <half -0.0, half -0.0>, %val
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %fneg.val)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fcanonicalize.ll b/test/CodeGen/AMDGPU/fcanonicalize.ll
index 8c385f40b1c5..feb4c7bd4a18 100644
--- a/test/CodeGen/AMDGPU/fcanonicalize.ll
+++ b/test/CodeGen/AMDGPU/fcanonicalize.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
declare float @llvm.fabs.f32(float) #0
declare float @llvm.canonicalize.f32(float) #0
diff --git a/test/CodeGen/AMDGPU/fcmp.f16.ll b/test/CodeGen/AMDGPU/fcmp.f16.ll
index 7916226462f7..aef898b1a8ee 100644
--- a/test/CodeGen/AMDGPU/fcmp.f16.ll
+++ b/test/CodeGen/AMDGPU/fcmp.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}fcmp_f16_lt
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
@@ -351,23 +351,12 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fcmp_v2f16_lt
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_lt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_lt_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_lt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_lt_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; GCN-LABEL: {{^}}fcmp_v2f16_lt:
+; SI: v_cmp_lt_f32_e32 vcc,
+; SI: v_cmp_lt_f32_e32 vcc,
+
+; VI: v_cmp_lt_f16_e32 vcc,
+; VI: v_cmp_lt_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_lt(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -382,22 +371,11 @@ entry:
}
; GCN-LABEL: {{^}}fcmp_v2f16_eq
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_eq_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_eq_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_eq_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_eq_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; SI: v_cmp_eq_f32_e32 vcc,
+; SI: v_cmp_eq_f32_e32 vcc,
+
+; VI: v_cmp_eq_f16_e32 vcc,
+; VI: v_cmp_eq_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_eq(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -411,23 +389,11 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fcmp_v2f16_le
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_le_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_le_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_le_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_le_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; GCN-LABEL: {{^}}fcmp_v2f16_le:
+; SI: v_cmp_le_f32_e32 vcc
+; SI: v_cmp_le_f32_e32 vcc
+; VI: v_cmp_le_f16_e32 vcc
+; VI: v_cmp_le_f16_e32 vcc
define amdgpu_kernel void @fcmp_v2f16_le(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -441,23 +407,12 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fcmp_v2f16_gt
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_gt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_gt_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_gt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_gt_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; GCN-LABEL: {{^}}fcmp_v2f16_gt:
+; SI: v_cmp_gt_f32_e32 vcc,
+; SI: v_cmp_gt_f32_e32 vcc,
+
+; VI: v_cmp_gt_f16_e32 vcc,
+; VI: v_cmp_gt_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_gt(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -471,23 +426,12 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fcmp_v2f16_lg
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_lg_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_lg_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_lg_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_lg_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; GCN-LABEL: {{^}}fcmp_v2f16_lg:
+; SI: v_cmp_lg_f32_e32 vcc,
+; SI: v_cmp_lg_f32_e32 vcc,
+
+; VI: v_cmp_lg_f16_e32 vcc,
+; VI: v_cmp_lg_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_lg(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -501,23 +445,12 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fcmp_v2f16_ge
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_ge_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_ge_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_ge_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_ge_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; GCN-LABEL: {{^}}fcmp_v2f16_ge:
+; SI: v_cmp_ge_f32_e32 vcc,
+; SI: v_cmp_ge_f32_e32 vcc,
+
+; VI: v_cmp_ge_f16_e32 vcc,
+; VI: v_cmp_ge_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_ge(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -531,23 +464,12 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fcmp_v2f16_o
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_o_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_o_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_o_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_o_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; GCN-LABEL: {{^}}fcmp_v2f16_o:
+; SI: v_cmp_o_f32_e32 vcc,
+; SI: v_cmp_o_f32_e32 vcc,
+
+; VI: v_cmp_o_f16_e32 vcc,
+; VI: v_cmp_o_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_o(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -561,23 +483,12 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fcmp_v2f16_u
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_u_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_u_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_u_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_u_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; GCN-LABEL: {{^}}fcmp_v2f16_u:
+; SI: v_cmp_u_f32_e32 vcc,
+; SI: v_cmp_u_f32_e32 vcc,
+
+; VI: v_cmp_u_f16_e32 vcc,
+; VI: v_cmp_u_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_u(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -592,22 +503,11 @@ entry:
}
; GCN-LABEL: {{^}}fcmp_v2f16_nge
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_nge_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_nge_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_nge_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_nge_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; SI: v_cmp_nge_f32_e32 vcc,
+; SI: v_cmp_nge_f32_e32 vcc,
+
+; VI: v_cmp_nge_f16_e32 vcc,
+; VI: v_cmp_nge_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_nge(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -622,22 +522,11 @@ entry:
}
; GCN-LABEL: {{^}}fcmp_v2f16_nlg
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_nlg_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_nlg_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_nlg_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_nlg_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; SI: v_cmp_nlg_f32_e32 vcc
+; SI: v_cmp_nlg_f32_e32 vcc
+
+; VI: v_cmp_nlg_f16_e32 vcc
+; VI: v_cmp_nlg_f16_e32 vcc
define amdgpu_kernel void @fcmp_v2f16_nlg(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -652,22 +541,11 @@ entry:
}
; GCN-LABEL: {{^}}fcmp_v2f16_ngt
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_ngt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_ngt_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_ngt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_ngt_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; SI: v_cmp_ngt_f32_e32 vcc,
+; SI: v_cmp_ngt_f32_e32 vcc,
+
+; VI: v_cmp_ngt_f16_e32 vcc,
+; VI: v_cmp_ngt_f16_e32 vcc,
define amdgpu_kernel void @fcmp_v2f16_ngt(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -682,22 +560,11 @@ entry:
}
; GCN-LABEL: {{^}}fcmp_v2f16_nle
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_nle_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_nle_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_nle_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_nle_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; SI: v_cmp_nle_f32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_cmp_nle_f32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
+
+; VI: v_cmp_nle_f16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_cmp_nle_f16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @fcmp_v2f16_nle(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -712,22 +579,11 @@ entry:
}
; GCN-LABEL: {{^}}fcmp_v2f16_neq
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_neq_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_neq_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_neq_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_neq_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
-; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
-; GCN: s_endpgm
+; SI: v_cmp_neq_f32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_cmp_neq_f32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
+
+; VI: v_cmp_neq_f16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_cmp_neq_f16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @fcmp_v2f16_neq(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
@@ -744,17 +600,19 @@ entry:
; GCN-LABEL: {{^}}fcmp_v2f16_nlt
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_cmp_nlt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cmp_nlt_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
-; VI: v_cmp_nlt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
-; VI: v_cmp_nlt_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN-DAG: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI-DAG: v_cmp_nlt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+
+; GCN-DAG: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI-DAG: v_cmp_nlt_f32_e32 vcc, v[[A_F32_1]], v[[B_F32_1]]
+; VI-DAG: v_cmp_nlt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+
+; VI: v_cmp_nlt_f16_e32 vcc, v[[A_F16_1]], v[[B_F16_1]]
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/fcmp64.ll b/test/CodeGen/AMDGPU/fcmp64.ll
index b9e1921d4c45..95f7e0be7d9c 100644
--- a/test/CodeGen/AMDGPU/fcmp64.ll
+++ b/test/CodeGen/AMDGPU/fcmp64.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s
; CHECK-LABEL: {{^}}flt_f64:
-; CHECK: v_cmp_nge_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_nge_f64_e32 vcc, {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define amdgpu_kernel void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
@@ -14,7 +14,7 @@ define amdgpu_kernel void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)*
}
; CHECK-LABEL: {{^}}fle_f64:
-; CHECK: v_cmp_ngt_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_ngt_f64_e32 vcc, {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define amdgpu_kernel void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
@@ -26,7 +26,7 @@ define amdgpu_kernel void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)*
}
; CHECK-LABEL: {{^}}fgt_f64:
-; CHECK: v_cmp_nle_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_nle_f64_e32 vcc, {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define amdgpu_kernel void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
@@ -38,7 +38,7 @@ define amdgpu_kernel void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)*
}
; CHECK-LABEL: {{^}}fge_f64:
-; CHECK: v_cmp_nlt_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_nlt_f64_e32 vcc, {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define amdgpu_kernel void @fge_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
@@ -50,7 +50,7 @@ define amdgpu_kernel void @fge_f64(i32 addrspace(1)* %out, double addrspace(1)*
}
; CHECK-LABEL: {{^}}fne_f64:
-; CHECK: v_cmp_neq_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_neq_f64_e32 vcc, {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define amdgpu_kernel void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
@@ -62,7 +62,7 @@ define amdgpu_kernel void @fne_f64(double addrspace(1)* %out, double addrspace(1
}
; CHECK-LABEL: {{^}}feq_f64:
-; CHECK: v_cmp_nlg_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_nlg_f64_e32 vcc, {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define amdgpu_kernel void @feq_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
diff --git a/test/CodeGen/AMDGPU/fconst64.ll b/test/CodeGen/AMDGPU/fconst64.ll
index 125597796245..ca313d80894a 100644
--- a/test/CodeGen/AMDGPU/fconst64.ll
+++ b/test/CodeGen/AMDGPU/fconst64.ll
@@ -6,8 +6,15 @@
; CHECK-DAG: s_mov_b32 {{s[0-9]+}}, 0
define amdgpu_kernel void @fconst_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
- %r1 = load double, double addrspace(1)* %in
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds double, double addrspace(1)* %in, i32 %tid
+ %r1 = load double, double addrspace(1)* %gep
%r2 = fadd double %r1, 5.000000e+00
store double %r2, double addrspace(1)* %out
ret void
}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fcopysign.f16.ll b/test/CodeGen/AMDGPU/fcopysign.f16.ll
index 4e2bf765cd95..8e984246cc94 100644
--- a/test/CodeGen/AMDGPU/fcopysign.f16.ll
+++ b/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX8 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX8 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
declare half @llvm.copysign.f16(half, half)
declare float @llvm.copysign.f32(float, float)
@@ -9,16 +9,18 @@ declare <2 x half> @llvm.copysign.v2f16(<2 x half>, <2 x half>)
declare <3 x half> @llvm.copysign.v3f16(<3 x half>, <3 x half>)
declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>)
+declare i32 @llvm.amdgcn.workitem.id.x()
+
; GCN-LABEL: {{^}}test_copysign_f16:
-; SI: buffer_load_ushort v[[SIGN:[0-9]+]]
-; SI: buffer_load_ushort v[[MAG:[0-9]+]]
+; SI: {{buffer|flat}}_load_ushort v[[SIGN:[0-9]+]]
+; SI: {{buffer|flat}}_load_ushort v[[MAG:[0-9]+]]
; SI: s_brev_b32 s[[CONST:[0-9]+]], -2
; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN_F32]]
; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
-; GFX89: buffer_load_ushort v[[SIGN:[0-9]+]]
-; GFX89: buffer_load_ushort v[[MAG:[0-9]+]]
+; GFX89: {{buffer|flat}}_load_ushort v[[SIGN:[0-9]+]]
+; GFX89: {{buffer|flat}}_load_ushort v[[MAG:[0-9]+]]
; GFX89: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN]]
; GCN: buffer_store_short v[[OUT]]
@@ -36,8 +38,8 @@ entry:
}
; GCN-LABEL: {{^}}test_copysign_out_f32_mag_f16_sign_f32:
-; GCN-DAG: buffer_load_ushort v[[MAG:[0-9]+]]
-; GCN-DAG: buffer_load_dword v[[SIGN:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_ushort v[[MAG:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dword v[[SIGN:[0-9]+]]
; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
; GCN-DAG: v_cvt_f32_f16_e32 v[[MAG_EXT:[0-9]+]], v[[MAG]]
; GCN: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG_EXT]], v[[SIGN]]
@@ -48,17 +50,20 @@ define amdgpu_kernel void @test_copysign_out_f32_mag_f16_sign_f32(
half addrspace(1)* %arg_mag,
float addrspace(1)* %arg_sign) {
entry:
- %mag = load half, half addrspace(1)* %arg_mag
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %arg_mag_gep = getelementptr half, half addrspace(1)* %arg_mag, i32 %tid
+ %mag = load half, half addrspace(1)* %arg_mag_gep
%mag.ext = fpext half %mag to float
- %sign = load float, float addrspace(1)* %arg_sign
+ %arg_sign_gep = getelementptr float, float addrspace(1)* %arg_sign, i32 %tid
+ %sign = load float, float addrspace(1)* %arg_sign_gep
%out = call float @llvm.copysign.f32(float %mag.ext, float %sign)
store float %out, float addrspace(1)* %arg_out
ret void
}
; GCN-LABEL: {{^}}test_copysign_out_f64_mag_f16_sign_f64:
-; GCN-DAG: buffer_load_ushort v[[MAG:[0-9]+]]
-; GCN-DAG: buffer_load_dwordx2 v{{\[}}[[SIGN_LO:[0-9]+]]:[[SIGN_HI:[0-9]+]]{{\]}}
+; GCN-DAG: {{buffer|flat}}_load_ushort v[[MAG:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dwordx2 v{{\[}}[[SIGN_LO:[0-9]+]]:[[SIGN_HI:[0-9]+]]{{\]}}
; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
; GCN-DAG: v_cvt_f32_f16_e32 v[[MAG_EXT:[0-9]+]], v[[MAG]]
; GCN-DAG: v_cvt_f64_f32_e32 v{{\[}}[[MAG_EXT_LO:[0-9]+]]:[[MAG_EXT_HI:[0-9]+]]{{\]}}, v[[MAG_EXT]]
@@ -70,17 +75,20 @@ define amdgpu_kernel void @test_copysign_out_f64_mag_f16_sign_f64(
half addrspace(1)* %arg_mag,
double addrspace(1)* %arg_sign) {
entry:
- %mag = load half, half addrspace(1)* %arg_mag
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %arg_mag_gep = getelementptr half, half addrspace(1)* %arg_mag, i32 %tid
+ %mag = load half, half addrspace(1)* %arg_mag_gep
%mag.ext = fpext half %mag to double
- %sign = load double, double addrspace(1)* %arg_sign
+ %arg_sign_gep = getelementptr double, double addrspace(1)* %arg_sign, i32 %tid
+ %sign = load double, double addrspace(1)* %arg_sign_gep
%out = call double @llvm.copysign.f64(double %mag.ext, double %sign)
store double %out, double addrspace(1)* %arg_out
ret void
}
; GCN-LABEL: {{^}}test_copysign_out_f32_mag_f32_sign_f16:
-; GCN-DAG: buffer_load_dword v[[MAG:[0-9]+]]
-; GCN-DAG: buffer_load_ushort v[[SIGN:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dword v[[MAG:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_ushort v[[SIGN:[0-9]+]]
; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
; SI: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_F32]]
@@ -93,8 +101,11 @@ define amdgpu_kernel void @test_copysign_out_f32_mag_f32_sign_f16(
float addrspace(1)* %arg_mag,
half addrspace(1)* %arg_sign) {
entry:
- %mag = load float, float addrspace(1)* %arg_mag
- %sign = load half, half addrspace(1)* %arg_sign
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %arg_mag_gep = getelementptr float, float addrspace(1)* %arg_mag, i32 %tid
+ %mag = load float, float addrspace(1)* %arg_mag_gep
+ %arg_sign_gep = getelementptr half, half addrspace(1)* %arg_sign, i32 %tid
+ %sign = load half, half addrspace(1)* %arg_sign_gep
%sign.ext = fpext half %sign to float
%out = call float @llvm.copysign.f32(float %mag, float %sign.ext)
store float %out, float addrspace(1)* %arg_out
@@ -102,8 +113,8 @@ entry:
}
; GCN-LABEL: {{^}}test_copysign_out_f64_mag_f64_sign_f16:
-; GCN-DAG: buffer_load_dwordx2 v{{\[}}[[MAG_LO:[0-9]+]]:[[MAG_HI:[0-9]+]]{{\]}}
-; GCN-DAG: buffer_load_ushort v[[SIGN:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dwordx2 v{{\[}}[[MAG_LO:[0-9]+]]:[[MAG_HI:[0-9]+]]{{\]}}
+; GCN-DAG: {{buffer|flat}}_load_ushort v[[SIGN:[0-9]+]]
; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
; SI: v_bfi_b32 v[[OUT_HI:[0-9]+]], s[[CONST]], v[[MAG_HI]], v[[SIGN_F32]]
@@ -116,8 +127,11 @@ define amdgpu_kernel void @test_copysign_out_f64_mag_f64_sign_f16(
double addrspace(1)* %arg_mag,
half addrspace(1)* %arg_sign) {
entry:
- %mag = load double, double addrspace(1)* %arg_mag
- %sign = load half, half addrspace(1)* %arg_sign
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %arg_mag_gep = getelementptr double, double addrspace(1)* %arg_mag, i32 %tid
+ %mag = load double, double addrspace(1)* %arg_mag_gep
+ %arg_sign_gep = getelementptr half, half addrspace(1)* %arg_sign, i32 %tid
+ %sign = load half, half addrspace(1)* %arg_sign_gep
%sign.ext = fpext half %sign to double
%out = call double @llvm.copysign.f64(double %mag, double %sign.ext)
store double %out, double addrspace(1)* %arg_out
@@ -125,8 +139,8 @@ entry:
}
; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f16_sign_f32:
-; GCN-DAG: buffer_load_ushort v[[MAG:[0-9]+]]
-; GCN-DAG: buffer_load_dword v[[SIGN:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_ushort v[[MAG:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dword v[[SIGN:[0-9]+]]
; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN]]
@@ -141,8 +155,11 @@ define amdgpu_kernel void @test_copysign_out_f16_mag_f16_sign_f32(
half addrspace(1)* %arg_mag,
float addrspace(1)* %arg_sign) {
entry:
- %mag = load half, half addrspace(1)* %arg_mag
- %sign = load float, float addrspace(1)* %arg_sign
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %arg_mag_gep = getelementptr half, half addrspace(1)* %arg_mag, i32 %tid
+ %mag = load half, half addrspace(1)* %arg_mag_gep
+ %arg_sign_gep = getelementptr float, float addrspace(1)* %arg_sign, i32 %tid
+ %sign = load float, float addrspace(1)* %arg_sign_gep
%sign.trunc = fptrunc float %sign to half
%out = call half @llvm.copysign.f16(half %mag, half %sign.trunc)
store half %out, half addrspace(1)* %arg_out
@@ -150,8 +167,8 @@ entry:
}
; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f16_sign_f64:
-; GCN-DAG: buffer_load_ushort v[[MAG:[0-9]+]]
-; GCN-DAG: buffer_load_dwordx2 v{{\[}}[[SIGN_LO:[0-9]+]]:[[SIGN_HI:[0-9]+]]{{\]}}
+; GCN-DAG: {{buffer|flat}}_load_ushort v[[MAG:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dwordx2 v{{\[}}[[SIGN_LO:[0-9]+]]:[[SIGN_HI:[0-9]+]]{{\]}}
; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN_HI]]
@@ -166,8 +183,11 @@ define amdgpu_kernel void @test_copysign_out_f16_mag_f16_sign_f64(
half addrspace(1)* %arg_mag,
double addrspace(1)* %arg_sign) {
entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %arg_mag_gep = getelementptr half, half addrspace(1)* %arg_mag, i32 %tid
%mag = load half, half addrspace(1)* %arg_mag
- %sign = load double, double addrspace(1)* %arg_sign
+ %arg_sign_gep = getelementptr double, double addrspace(1)* %arg_sign, i32 %tid
+ %sign = load double, double addrspace(1)* %arg_sign_gep
%sign.trunc = fptrunc double %sign to half
%out = call half @llvm.copysign.f16(half %mag, half %sign.trunc)
store half %out, half addrspace(1)* %arg_out
@@ -175,8 +195,8 @@ entry:
}
; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f32_sign_f16:
-; GCN-DAG: buffer_load_dword v[[MAG:[0-9]+]]
-; GCN-DAG: buffer_load_ushort v[[SIGN:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_dword v[[MAG:[0-9]+]]
+; GCN-DAG: {{buffer|flat}}_load_ushort v[[SIGN:[0-9]+]]
; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
; SI-DAG: v_cvt_f16_f32_e32 v[[MAG_TRUNC:[0-9]+]], v[[MAG]]
; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
@@ -193,9 +213,12 @@ define amdgpu_kernel void @test_copysign_out_f16_mag_f32_sign_f16(
float addrspace(1)* %arg_mag,
half addrspace(1)* %arg_sign) {
entry:
- %mag = load float, float addrspace(1)* %arg_mag
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %arg_mag_gep = getelementptr float, float addrspace(1)* %arg_mag, i32 %tid
+ %mag = load float, float addrspace(1)* %arg_mag_gep
%mag.trunc = fptrunc float %mag to half
- %sign = load half, half addrspace(1)* %arg_sign
+ %arg_sign_gep = getelementptr half, half addrspace(1)* %arg_sign, i32 %tid
+ %sign = load half, half addrspace(1)* %arg_sign_gep
%out = call half @llvm.copysign.f16(half %mag.trunc, half %sign)
store half %out, half addrspace(1)* %arg_out
ret void
diff --git a/test/CodeGen/AMDGPU/fdiv.f16.ll b/test/CodeGen/AMDGPU/fdiv.f16.ll
index 7f84e973c958..333143393cb4 100644
--- a/test/CodeGen/AMDGPU/fdiv.f16.ll
+++ b/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -27,7 +27,7 @@
; VI-DAG: v_cvt_f32_f16_e32 [[CVT_RHS:v[0-9]+]], [[RHS]]
; VI-DAG: v_rcp_f32_e32 [[RCP_RHS:v[0-9]+]], [[CVT_RHS]]
-; VI: v_mul_f32_e32 [[MUL:v[0-9]+]], [[RCP_RHS]], [[CVT_LHS]]
+; VI: v_mul_f32_e32 [[MUL:v[0-9]+]], [[CVT_LHS]], [[RCP_RHS]]
; VI: v_cvt_f16_f32_e32 [[CVT_BACK:v[0-9]+]], [[MUL]]
; VI: v_div_fixup_f16 [[RESULT:v[0-9]+]], [[CVT_BACK]], [[RHS]], [[LHS]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -165,7 +165,7 @@ entry:
; VI: flat_load_ushort [[RHS:v[0-9]+]]
; VI: v_rcp_f16_e32 [[RCP:v[0-9]+]], [[RHS]]
-; VI: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[RCP]], [[LHS]]
+; VI: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[LHS]], [[RCP]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fdiv_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #0 {
@@ -187,7 +187,7 @@ entry:
; VI: flat_load_ushort [[RHS:v[0-9]+]]
; VI: v_rcp_f16_e32 [[RCP:v[0-9]+]], [[RHS]]
-; VI: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[RCP]], [[LHS]]
+; VI: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[LHS]], [[RCP]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fdiv_f16_unsafe(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #2 {
diff --git a/test/CodeGen/AMDGPU/fdiv.ll b/test/CodeGen/AMDGPU/fdiv.ll
index 738a5adba14f..bc489454341a 100644
--- a/test/CodeGen/AMDGPU/fdiv.ll
+++ b/test/CodeGen/AMDGPU/fdiv.ll
@@ -20,7 +20,7 @@
; GCN: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
; GCN: v_fma_f32 [[A:v[0-9]+]], -[[NUM_SCALE]], [[NUM_RCP]], 1.0
; GCN: v_fma_f32 [[B:v[0-9]+]], [[A]], [[NUM_RCP]], [[NUM_RCP]]
-; GCN: v_mul_f32_e32 [[C:v[0-9]+]], [[B]], [[DEN_SCALE]]
+; GCN: v_mul_f32_e32 [[C:v[0-9]+]], [[DEN_SCALE]], [[B]]
; GCN: v_fma_f32 [[D:v[0-9]+]], -[[NUM_SCALE]], [[C]], [[DEN_SCALE]]
; GCN: v_fma_f32 [[E:v[0-9]+]], [[D]], [[B]], [[C]]
; GCN: v_fma_f32 [[F:v[0-9]+]], -[[NUM_SCALE]], [[E]], [[DEN_SCALE]]
@@ -45,7 +45,7 @@ entry:
; GCN-NOT: s_setreg
; GCN: v_fma_f32 [[A:v[0-9]+]], -[[NUM_SCALE]], [[NUM_RCP]], 1.0
; GCN: v_fma_f32 [[B:v[0-9]+]], [[A]], [[NUM_RCP]], [[NUM_RCP]]
-; GCN: v_mul_f32_e32 [[C:v[0-9]+]], [[B]], [[DEN_SCALE]]
+; GCN: v_mul_f32_e32 [[C:v[0-9]+]], [[DEN_SCALE]], [[B]]
; GCN: v_fma_f32 [[D:v[0-9]+]], -[[NUM_SCALE]], [[C]], [[DEN_SCALE]]
; GCN: v_fma_f32 [[E:v[0-9]+]], [[D]], [[B]], [[C]]
; GCN: v_fma_f32 [[F:v[0-9]+]], -[[NUM_SCALE]], [[E]], [[DEN_SCALE]]
@@ -85,20 +85,11 @@ entry:
}
; FUNC-LABEL: {{^}}fdiv_fast_denormals_f32:
-; GCN: v_div_scale_f32 [[NUM_SCALE:v[0-9]+]]
-; GCN-DAG: v_div_scale_f32 [[DEN_SCALE:v[0-9]+]]
-; GCN-DAG: v_rcp_f32_e32 [[NUM_RCP:v[0-9]+]], [[NUM_SCALE]]
-
-; GCN-NOT: s_setreg
-; GCN: v_fma_f32 [[A:v[0-9]+]], -[[NUM_SCALE]], [[NUM_RCP]], 1.0
-; GCN: v_fma_f32 [[B:v[0-9]+]], [[A]], [[NUM_RCP]], [[NUM_RCP]]
-; GCN: v_mul_f32_e32 [[C:v[0-9]+]], [[B]], [[DEN_SCALE]]
-; GCN: v_fma_f32 [[D:v[0-9]+]], -[[NUM_SCALE]], [[C]], [[DEN_SCALE]]
-; GCN: v_fma_f32 [[E:v[0-9]+]], [[D]], [[B]], [[C]]
-; GCN: v_fma_f32 [[F:v[0-9]+]], -[[NUM_SCALE]], [[E]], [[DEN_SCALE]]
+; GCN: v_rcp_f32_e32 [[RCP:v[0-9]+]], s{{[0-9]+}}
+; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], s{{[0-9]+}}, [[RCP]]
+; GCN-NOT: [[RESULT]]
; GCN-NOT: s_setreg
-; GCN: v_div_fmas_f32 [[FMAS:v[0-9]+]], [[F]], [[B]], [[E]]
-; GCN: v_div_fixup_f32 v{{[0-9]+}}, [[FMAS]],
+; GCN: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @fdiv_fast_denormals_f32(float addrspace(1)* %out, float %a, float %b) #2 {
entry:
%fdiv = fdiv fast float %a, %b
@@ -121,6 +112,21 @@ entry:
ret void
}
+; FUNC-LABEL: {{^}}fdiv_ulp25_f32_fast_math:
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[2].W
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, PS
+
+; GCN: v_rcp_f32_e32 [[RCP:v[0-9]+]], s{{[0-9]+}}
+; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], s{{[0-9]+}}, [[RCP]]
+; GCN-NOT: [[RESULT]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @fdiv_ulp25_f32_fast_math(float addrspace(1)* %out, float %a, float %b) #0 {
+entry:
+ %fdiv = fdiv fast float %a, %b, !fpmath !0
+ store float %fdiv, float addrspace(1)* %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}fdiv_f32_arcp_math:
; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[2].W
; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, PS
@@ -154,8 +160,9 @@ entry:
}
; FUNC-LABEL: {{^}}fdiv_ulp25_v2f32:
-; GCN: v_cmp_gt_f32
-; GCN: v_cmp_gt_f32
+; GCN: v_rcp_f32
+; GCN: v_rcp_f32
+; GCN-NOT: v_cmp_gt_f32
define amdgpu_kernel void @fdiv_ulp25_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
entry:
%fdiv = fdiv arcp <2 x float> %a, %b, !fpmath !0
diff --git a/test/CodeGen/AMDGPU/fma-combine.ll b/test/CodeGen/AMDGPU/fma-combine.ll
index 4113ba8dc1f0..7526d08bdbe5 100644
--- a/test/CodeGen/AMDGPU/fma-combine.ll
+++ b/test/CodeGen/AMDGPU/fma-combine.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-NOFMA -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-NOFMA -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast -enable-no-infs-fp-math -enable-unsafe-fp-math -mattr=+fp32-denormals < %s | FileCheck -check-prefix=SI-FMA -check-prefix=SI-UNSAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-NOFMA -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-NOFMA -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast -enable-no-infs-fp-math -enable-unsafe-fp-math -mattr=+fp32-denormals < %s | FileCheck -check-prefix=SI-FMA -check-prefix=SI-UNSAFE -check-prefix=SI -check-prefix=FUNC %s
; Note: The SI-FMA conversions of type x * (y + 1) --> x * y + x would be
; beneficial even without fp32 denormals, but they do require no-infs-fp-math
@@ -387,7 +387,7 @@ define amdgpu_kernel void @aggressive_combine_to_fma_fsub_1_f64(double addrspace
; FUNC-LABEL: {{^}}test_f32_mul_add_x_one_y:
; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_add_x_one_y(float addrspace(1)* %out,
@@ -403,7 +403,7 @@ define amdgpu_kernel void @test_f32_mul_add_x_one_y(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_y_add_x_one:
; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_y_add_x_one(float addrspace(1)* %out,
@@ -419,7 +419,7 @@ define amdgpu_kernel void @test_f32_mul_y_add_x_one(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_add_x_negone_y:
; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_add_x_negone_y(float addrspace(1)* %out,
@@ -435,7 +435,7 @@ define amdgpu_kernel void @test_f32_mul_add_x_negone_y(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_y_add_x_negone:
; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_y_add_x_negone(float addrspace(1)* %out,
@@ -451,7 +451,7 @@ define amdgpu_kernel void @test_f32_mul_y_add_x_negone(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_sub_one_x_y:
; SI-NOFMA: v_sub_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_sub_one_x_y(float addrspace(1)* %out,
@@ -467,7 +467,7 @@ define amdgpu_kernel void @test_f32_mul_sub_one_x_y(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_y_sub_one_x:
; SI-NOFMA: v_sub_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_y_sub_one_x(float addrspace(1)* %out,
@@ -483,7 +483,7 @@ define amdgpu_kernel void @test_f32_mul_y_sub_one_x(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_sub_negone_x_y:
; SI-NOFMA: v_sub_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_sub_negone_x_y(float addrspace(1)* %out,
@@ -499,7 +499,7 @@ define amdgpu_kernel void @test_f32_mul_sub_negone_x_y(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_y_sub_negone_x:
; SI-NOFMA: v_sub_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_y_sub_negone_x(float addrspace(1)* %out,
@@ -515,7 +515,7 @@ define amdgpu_kernel void @test_f32_mul_y_sub_negone_x(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_sub_x_one_y:
; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_sub_x_one_y(float addrspace(1)* %out,
@@ -531,7 +531,7 @@ define amdgpu_kernel void @test_f32_mul_sub_x_one_y(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_y_sub_x_one:
; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_y_sub_x_one(float addrspace(1)* %out,
@@ -547,7 +547,7 @@ define amdgpu_kernel void @test_f32_mul_y_sub_x_one(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_sub_x_negone_y:
; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_sub_x_negone_y(float addrspace(1)* %out,
@@ -563,7 +563,7 @@ define amdgpu_kernel void @test_f32_mul_sub_x_negone_y(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_mul_y_sub_x_negone:
; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define amdgpu_kernel void @test_f32_mul_y_sub_x_negone(float addrspace(1)* %out,
@@ -583,8 +583,8 @@ define amdgpu_kernel void @test_f32_mul_y_sub_x_negone(float addrspace(1)* %out,
; FUNC-LABEL: {{^}}test_f32_interp:
; SI-NOFMA: v_sub_f32_e32 [[VT1:v[0-9]]], 1.0, [[VT:v[0-9]]]
-; SI-NOFMA: v_mul_f32_e32 [[VTY:v[0-9]]], [[VT1]], [[VY:v[0-9]]]
-; SI-NOFMA: v_mac_f32_e32 [[VTY]], [[VT]], [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 [[VTY:v[0-9]]], [[VY:v[0-9]]], [[VT1]]
+; SI-NOFMA: v_mac_f32_e32 [[VTY]], [[VX:v[0-9]]], [[VT]]
;
; SI-FMA: v_fma_f32 [[VR:v[0-9]]], -[[VT:v[0-9]]], [[VY:v[0-9]]], [[VY]]
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VT]], [[VR]]
diff --git a/test/CodeGen/AMDGPU/fma.f64.ll b/test/CodeGen/AMDGPU/fma.f64.ll
index 4d3f3712621e..907121f1cd46 100644
--- a/test/CodeGen/AMDGPU/fma.f64.ll
+++ b/test/CodeGen/AMDGPU/fma.f64.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare double @llvm.fma.f64(double, double, double) nounwind readnone
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/fma.ll b/test/CodeGen/AMDGPU/fma.ll
index 659cecb59ebf..6be4c450a51e 100644
--- a/test/CodeGen/AMDGPU/fma.ll
+++ b/test/CodeGen/AMDGPU/fma.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare float @llvm.fma.f32(float, float, float) nounwind readnone
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/fmax_legacy.ll b/test/CodeGen/AMDGPU/fmax_legacy.ll
index 7643c3ea533c..44c80b63bf7c 100644
--- a/test/CodeGen/AMDGPU/fmax_legacy.ll
+++ b/test/CodeGen/AMDGPU/fmax_legacy.ll
@@ -10,7 +10,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_uge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
@@ -31,7 +31,7 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32(float addrspace(1)* %out, fl
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
-; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_oge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
@@ -51,7 +51,7 @@ define amdgpu_kernel void @test_fmax_legacy_oge_f32(float addrspace(1)* %out, fl
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_ugt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
@@ -71,7 +71,7 @@ define amdgpu_kernel void @test_fmax_legacy_ugt_f32(float addrspace(1)* %out, fl
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
-; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_ogt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
@@ -91,7 +91,7 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_f32(float addrspace(1)* %out, fl
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
-; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(<1 x float> addrspace(1)* %out, <1 x float> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
diff --git a/test/CodeGen/AMDGPU/fmed3.ll b/test/CodeGen/AMDGPU/fmed3.ll
index 27d9261b1fab..4cfc9fc80fb0 100644
--- a/test/CodeGen/AMDGPU/fmed3.ll
+++ b/test/CodeGen/AMDGPU/fmed3.ll
@@ -872,8 +872,8 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod0_mismatch(fl
; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
-; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], [[B]], [[A]]
-; GCN: v_min_f32_e32 v{{[0-9]+}}, [[C]], [[MAX]]
+; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], [[A]], [[B]]
+; GCN: v_min_f32_e32 v{{[0-9]+}}, [[MAX]], [[C]]
define amdgpu_kernel void @v_test_global_nnans_min_max_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
diff --git a/test/CodeGen/AMDGPU/fmin_legacy.ll b/test/CodeGen/AMDGPU/fmin_legacy.ll
index 52336f95a909..0494295fc15f 100644
--- a/test/CodeGen/AMDGPU/fmin_legacy.ll
+++ b/test/CodeGen/AMDGPU/fmin_legacy.ll
@@ -45,7 +45,7 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(float addrspace(1)* %out,
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -64,7 +64,7 @@ define amdgpu_kernel void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, fl
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
-; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -83,7 +83,7 @@ define amdgpu_kernel void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, fl
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
-; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -102,7 +102,7 @@ define amdgpu_kernel void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, fl
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -121,7 +121,7 @@ define amdgpu_kernel void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, fl
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(<1 x float> addrspace(1)* %out, <1 x float> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr <1 x float>, <1 x float> addrspace(1)* %in, i32 %tid
diff --git a/test/CodeGen/AMDGPU/fmul.f16.ll b/test/CodeGen/AMDGPU/fmul.f16.ll
index cd86409e2038..5f120f63d7fe 100644
--- a/test/CodeGen/AMDGPU/fmul.f16.ll
+++ b/test/CodeGen/AMDGPU/fmul.f16.ll
@@ -1,14 +1,14 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}fmul_f16
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_mul_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; VI: v_mul_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fmul_f16(
@@ -70,16 +70,16 @@ entry:
; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
-; VI-DAG: v_mul_f16_e32 v[[R_F16_LO:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_mul_f16_e32 v[[R_F16_LO:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]]
; VI-DAG: v_mul_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_LO]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
@@ -108,7 +108,7 @@ entry:
; VI-DAG: v_mul_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[B_V2_F16]], v[[CONST4]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-DAG: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fmul_v2f16_imm_a(
@@ -134,7 +134,7 @@ entry:
; VI-DAG: v_mul_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[A_V2_F16]], v[[CONST3]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-DAG: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], 4.0, v[[A_V2_F16]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fmul_v2f16_imm_b(
diff --git a/test/CodeGen/AMDGPU/fmul64.ll b/test/CodeGen/AMDGPU/fmul64.ll
index f14233f267b2..d37d432842f3 100644
--- a/test/CodeGen/AMDGPU/fmul64.ll
+++ b/test/CodeGen/AMDGPU/fmul64.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
; FUNC-LABEL: {{^}}fmul_f64:
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/AMDGPU/fmuladd.f16.ll b/test/CodeGen/AMDGPU/fmuladd.f16.ll
index 9b713419e747..980d68ceded8 100644
--- a/test/CodeGen/AMDGPU/fmuladd.f16.ll
+++ b/test/CodeGen/AMDGPU/fmuladd.f16.ll
@@ -79,7 +79,7 @@ define amdgpu_kernel void @fmuladd_a_2.0_b_f16(half addrspace(1)* %out, half add
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
; VI-DENORM-STRICT: v_add_f16_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @fadd_a_a_b_f16(half addrspace(1)* %out,
@@ -108,7 +108,7 @@ define amdgpu_kernel void @fadd_a_a_b_f16(half addrspace(1)* %out,
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
; VI-DENORM-STRICT: v_add_f16_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @fadd_b_a_a_f16(half addrspace(1)* %out,
@@ -227,8 +227,8 @@ define amdgpu_kernel void @fmuladd_2.0_a_neg_b_f16(half addrspace(1)* %out, half
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -[[REGC]]
-; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
+; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; VI-DENORM-STRICT: v_sub_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @mad_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
@@ -257,8 +257,8 @@ define amdgpu_kernel void @mad_sub_f16(half addrspace(1)* noalias nocapture %out
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], [[REGC]]
-; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
+; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; VI-DENORM-STRICT: v_sub_f16_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @mad_sub_inv_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
@@ -287,7 +287,7 @@ define amdgpu_kernel void @mad_sub_inv_f16(half addrspace(1)* noalias nocapture
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -|[[REGC]]|
-; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
+; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
; VI-DENORM-STRICT: v_sub_f16_e64 [[RESULT:v[0-9]+]], [[TMP]], |[[REGC]]|
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -319,7 +319,7 @@ define amdgpu_kernel void @mad_sub_fabs_f16(half addrspace(1)* noalias nocapture
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], |[[REGC]]|
-; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
+; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
; VI-DENORM-STRICT: v_sub_f16_e64 [[RESULT:v[0-9]+]], |[[REGC]]|, [[TMP]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -347,13 +347,13 @@ define amdgpu_kernel void @mad_sub_fabs_inv_f16(half addrspace(1)* noalias nocap
; GCN: {{buffer|flat}}_load_ushort [[REGB:v[0-9]+]]
; GCN: {{buffer|flat}}_load_ushort [[REGC:v[0-9]+]]
-; VI-FLUSH: v_mac_f16_e32 [[REGC]], [[REGB]], [[REGA]]
+; VI-FLUSH: v_mac_f16_e32 [[REGC]], [[REGA]], [[REGB]]
; VI-FLUSH: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[REGC]]
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], [[REGC]]
-; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
+; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @neg_neg_mad_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
@@ -385,7 +385,7 @@ define amdgpu_kernel void @neg_neg_mad_f16(half addrspace(1)* noalias nocapture
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], [[REGA]], |[[REGB]]|, -[[REGC]]
; VI-DENORM-STRICT: v_mul_f16_e64 [[TMP:v[0-9]+]], [[REGA]], |[[REGB]]|
-; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
+; VI-DENORM-STRICT: v_sub_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @mad_fabs_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
@@ -416,7 +416,7 @@ define amdgpu_kernel void @mad_fabs_sub_f16(half addrspace(1)* noalias nocapture
; VI-DENORM-CONTRACT: v_fma_f16 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]]
; VI-DENORM-STRICT: v_add_f16_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; VI-DENORM-STRICT: v_sub_f16_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @fsub_c_fadd_a_a_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
@@ -444,7 +444,7 @@ define amdgpu_kernel void @fsub_c_fadd_a_a_f16(half addrspace(1)* %out, half add
; VI-DENORM-CONTRACT: v_fma_f16 [[R2]], [[R1]], 2.0, -[[R2]]
; VI-DENORM-STRICT: v_add_f16_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; VI-DENORM-STRICT: v_sub_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @fsub_fadd_a_a_c_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
diff --git a/test/CodeGen/AMDGPU/fmuladd.f32.ll b/test/CodeGen/AMDGPU/fmuladd.f32.ll
index e42255026692..4b1e41ff91e1 100644
--- a/test/CodeGen/AMDGPU/fmuladd.f32.ll
+++ b/test/CodeGen/AMDGPU/fmuladd.f32.ll
@@ -1,12 +1,12 @@
-; RUN: llc -verify-machineinstrs -mcpu=tahiti -mattr=-fp32-denormals,+fast-fmaf -fp-contract=on < %s | FileCheck -check-prefixes=GCN,GCN-FLUSH-STRICT,GCN-FLUSH,SI-FLUSH,GCN-FLUSH-FASTFMA,GCN-FLUSH-FASTFMA-STRICT,SI %s
-; RUN: llc -verify-machineinstrs -mcpu=tahiti -mattr=+fp32-denormals,+fast-fmaf -fp-contract=on < %s | FileCheck -check-prefixes=GCN,GCN-DENORM-STRICT,GCN-DENORM,SI-DENORM,GCN-DENORM-FASTFMA,GCN-DENORM-FASTFMA-STRICT,SI %s
-; RUN: llc -verify-machineinstrs -mcpu=verde -mattr=-fp32-denormals,-fast-fmaf -fp-contract=on < %s | FileCheck -check-prefixes=GCN,GCN-FLUSH-STRICT,GCN-FLUSH,SI-FLUSH,GCN-FLUSH-SLOWFMA,GCN-FLUSH-SLOWFMA-STRICT,SI %s
-; RUN: llc -verify-machineinstrs -mcpu=verde -mattr=+fp32-denormals,-fast-fmaf -fp-contract=on < %s | FileCheck -check-prefixes=GCN,GCN-DENORM-STRICT,GCN-DENORM,SI-DENORM,GCN-DENORM-SLOWFMA,GCN-DENORM-SLOWFMA-STRICT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -mcpu=tahiti -mattr=-fp32-denormals,+fast-fmaf -fp-contract=on < %s | FileCheck -check-prefixes=GCN,GCN-FLUSH-STRICT,GCN-FLUSH,SI-FLUSH,GCN-FLUSH-FASTFMA,GCN-FLUSH-FASTFMA-STRICT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -mcpu=tahiti -mattr=+fp32-denormals,+fast-fmaf -fp-contract=on < %s | FileCheck -check-prefixes=GCN,GCN-DENORM-STRICT,GCN-DENORM,SI-DENORM,GCN-DENORM-FASTFMA,GCN-DENORM-FASTFMA-STRICT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -mcpu=verde -mattr=-fp32-denormals,-fast-fmaf -fp-contract=on < %s | FileCheck -check-prefixes=GCN,GCN-FLUSH-STRICT,GCN-FLUSH,SI-FLUSH,GCN-FLUSH-SLOWFMA,GCN-FLUSH-SLOWFMA-STRICT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -mcpu=verde -mattr=+fp32-denormals,-fast-fmaf -fp-contract=on < %s | FileCheck -check-prefixes=GCN,GCN-DENORM-STRICT,GCN-DENORM,SI-DENORM,GCN-DENORM-SLOWFMA,GCN-DENORM-SLOWFMA-STRICT,SI %s
-; RUN: llc -verify-machineinstrs -mcpu=tahiti -mattr=-fp32-denormals,+fast-fmaf -fp-contract=fast < %s | FileCheck -check-prefixes=GCN,GCN-FLUSH-CONTRACT,GCN-FLUSH,SI-FLUSH,GCN-FLUSH-FASTFMA,GCN-FLUSH-FASTFMA-CONTRACT,SI %s
-; RUN: llc -verify-machineinstrs -mcpu=tahiti -mattr=+fp32-denormals,+fast-fmaf -fp-contract=fast < %s | FileCheck -check-prefixes=GCN,GCN-DENORM-CONTRACT,GCN-DENORM,SI-DENORM,GCN-DENORM-FASTFMA,GCN-DENORM-FASTFMA-CONTRACT,SI %s
-; RUN: llc -verify-machineinstrs -mcpu=verde -mattr=-fp32-denormals,-fast-fmaf -fp-contract=fast < %s | FileCheck -check-prefixes=GCN,GCN-FLUSH-CONTRACT,GCN-FLUSH,SI-FLUSH,GCN-FLUSH-SLOWFMA,GCN-FLUSH-SLOWFMA-CONTRACT,SI %s
-; RUN: llc -verify-machineinstrs -mcpu=verde -mattr=+fp32-denormals,-fast-fmaf -fp-contract=fast < %s | FileCheck -check-prefixes=GCN,GCN-DENORM-CONTRACT,GCN-DENORM,SI-DENORM,GCN-DENORM-SLOWFMA,GCN-DENORM-SLOWFMA-CONTRACT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -mcpu=tahiti -mattr=-fp32-denormals,+fast-fmaf -fp-contract=fast < %s | FileCheck -check-prefixes=GCN,GCN-FLUSH-CONTRACT,GCN-FLUSH,SI-FLUSH,GCN-FLUSH-FASTFMA,GCN-FLUSH-FASTFMA-CONTRACT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -mcpu=tahiti -mattr=+fp32-denormals,+fast-fmaf -fp-contract=fast < %s | FileCheck -check-prefixes=GCN,GCN-DENORM-CONTRACT,GCN-DENORM,SI-DENORM,GCN-DENORM-FASTFMA,GCN-DENORM-FASTFMA-CONTRACT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -mcpu=verde -mattr=-fp32-denormals,-fast-fmaf -fp-contract=fast < %s | FileCheck -check-prefixes=GCN,GCN-FLUSH-CONTRACT,GCN-FLUSH,SI-FLUSH,GCN-FLUSH-SLOWFMA,GCN-FLUSH-SLOWFMA-CONTRACT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -mcpu=verde -mattr=+fp32-denormals,-fast-fmaf -fp-contract=fast < %s | FileCheck -check-prefixes=GCN,GCN-DENORM-CONTRACT,GCN-DENORM,SI-DENORM,GCN-DENORM-SLOWFMA,GCN-DENORM-SLOWFMA-CONTRACT,SI %s
; Test all permutations of: fp32 denormals, fast fp contract, fp contract enabled for fmuladd, fmaf fast/slow.
@@ -67,7 +67,7 @@ define amdgpu_kernel void @fmul_fadd_f32(float addrspace(1)* %out, float addrspa
; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; SI-DENORM buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -96,7 +96,7 @@ define amdgpu_kernel void @fmuladd_2.0_a_b_f32(float addrspace(1)* %out, float a
; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -125,10 +125,10 @@ define amdgpu_kernel void @fmuladd_a_2.0_b_f32(float addrspace(1)* %out, float a
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; GCN-DENORM-STRICT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -160,10 +160,10 @@ define amdgpu_kernel void @fadd_a_a_b_f32(float addrspace(1)* %out,
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; GCN-DENORM-STRICT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -192,7 +192,7 @@ define amdgpu_kernel void @fadd_b_a_a_f32(float addrspace(1)* %out,
; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]]
; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; GCN-DENORM-SLOWFMA: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -221,7 +221,7 @@ define amdgpu_kernel void @fmuladd_neg_2.0_a_b_f32(float addrspace(1)* %out, flo
; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], -[[R1]], -2.0, [[R2]]
; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -252,7 +252,7 @@ define amdgpu_kernel void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out,
; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], -[[R1]], 2.0, [[R2]]
; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; GCN-DENORM-SLOWFMA: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -282,7 +282,7 @@ define amdgpu_kernel void @fmuladd_2.0_neg_a_b_f32(float addrspace(1)* %out, flo
; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]]
; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; GCN-DENORM-SLOWFMA: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -310,11 +310,11 @@ define amdgpu_kernel void @fmuladd_2.0_a_neg_b_f32(float addrspace(1)* %out, flo
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -[[REGC]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
-; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; GCN-DENORM-STRICT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
+; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -345,11 +345,11 @@ define amdgpu_kernel void @mad_sub_f32(float addrspace(1)* noalias nocapture %ou
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], [[REGC]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
-; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; GCN-DENORM-STRICT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
+; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -379,10 +379,10 @@ define amdgpu_kernel void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -|[[REGC]]|
-; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e64 [[RESULT:v[0-9]+]], [[TMP]], |[[REGC]]|
-; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
+; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
; GCN-DENORM-STRICT: v_sub_f32_e64 [[RESULT:v[0-9]+]], [[TMP]], |[[REGC]]|
; SI: buffer_store_dword [[RESULT]]
@@ -414,10 +414,10 @@ define amdgpu_kernel void @mad_sub_fabs_f32(float addrspace(1)* noalias nocaptur
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], |[[REGC]]|
-; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e64 [[RESULT:v[0-9]+]], |[[REGC]]|, [[TMP]]
-; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
+; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
; GCN-DENORM-STRICT: v_sub_f32_e64 [[RESULT:v[0-9]+]], |[[REGC]]|, [[TMP]]
; SI: buffer_store_dword [[RESULT]]
@@ -446,17 +446,17 @@ define amdgpu_kernel void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias noca
; GCN: {{buffer|flat}}_load_dword [[REGB:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[REGC:v[0-9]+]]
-; GCN-FLUSH: v_mac_f32_e32 [[REGC]], [[REGB]], [[REGA]]
+; GCN-FLUSH: v_mac_f32_e32 [[REGC]], [[REGA]], [[REGB]]
; SI-FLUSH: buffer_store_dword [[REGC]]
; VI-FLUSH: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[REGC]]
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], [[REGC]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
-; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
-; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
+; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]]
+; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -489,10 +489,10 @@ define amdgpu_kernel void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[REGA]], |[[REGB]]|, -[[REGC]]
; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e64 [[TMP:v[0-9]+]], [[REGA]], |[[REGB]]|
-; GCN-DENORM-SLOWFMA-CONTRACT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
; GCN-DENORM-STRICT: v_mul_f32_e64 [[TMP:v[0-9]+]], [[REGA]], |[[REGB]]|
-; GCN-DENORM-STRICT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
+; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -525,10 +525,10 @@ define amdgpu_kernel void @mad_fabs_sub_f32(float addrspace(1)* noalias nocaptur
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]]
; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; GCN-DENORM-STRICT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-STRICT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -556,10 +556,10 @@ define amdgpu_kernel void @fsub_c_fadd_a_a_f32(float addrspace(1)* %out, float a
; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]]
; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-SLOWFMA-CONTRACT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; GCN-DENORM-STRICT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
-; GCN-DENORM-STRICT: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
diff --git a/test/CodeGen/AMDGPU/fmuladd.f64.ll b/test/CodeGen/AMDGPU/fmuladd.f64.ll
index 86e91e04b0fc..8d91a56ee421 100644
--- a/test/CodeGen/AMDGPU/fmuladd.f64.ll
+++ b/test/CodeGen/AMDGPU/fmuladd.f64.ll
@@ -1,9 +1,9 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICTSI %s
-; RUN: llc -march=amdgcn -mcpu=verde -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,SI %s
-; RUN: llc -march=amdgcn -mcpu=tahiti -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,SI %s
-; RUN: llc -march=amdgcn -mcpu=verde -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICTSI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI %s
; GCN-LABEL: {{^}}fmuladd_f64:
; GCN: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/AMDGPU/fmuladd.v2f16.ll b/test/CodeGen/AMDGPU/fmuladd.v2f16.ll
index 624610096cbc..b50a26c023ca 100644
--- a/test/CodeGen/AMDGPU/fmuladd.v2f16.ll
+++ b/test/CodeGen/AMDGPU/fmuladd.v2f16.ll
@@ -1,12 +1,12 @@
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=on -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-FLUSH,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=on -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-FLUSH,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-FLUSH,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-FLUSH,GFX9 %s
-
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=on -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-DENORM-STRICT,GFX9-DENORM,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=on -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-DENORM-STRICT,GFX9-DENORM,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-DENORM-CONTRACT,GFX9-DENORM,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-DENORM-CONTRACT,GFX9-DENORM,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=on -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-FLUSH,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=on -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-FLUSH,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-FLUSH,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-FLUSH,GFX9 %s
+
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=on -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-DENORM-STRICT,GFX9-DENORM,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=on -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-DENORM-STRICT,GFX9-DENORM,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-DENORM-CONTRACT,GFX9-DENORM,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-DENORM-CONTRACT,GFX9-DENORM,GFX9 %s
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) #1
diff --git a/test/CodeGen/AMDGPU/fneg-combines.ll b/test/CodeGen/AMDGPU/fneg-combines.ll
index 66bf9d0ffb00..002bc47fb96a 100644
--- a/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -9,7 +9,7 @@
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]]
; GCN-NSZ: v_sub_f32_e64 [[RESULT:v[0-9]+]], -[[A]], [[B]]
@@ -31,7 +31,7 @@ define amdgpu_kernel void @v_fneg_add_f32(float addrspace(1)* %out, float addrsp
; GCN-LABEL: {{^}}v_fneg_add_store_use_add_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_add_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-DAG: v_add_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-DAG: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]]
; GCN-NEXT: buffer_store_dword [[NEG_ADD]]
; GCN-NEXT: buffer_store_dword [[ADD]]
@@ -54,7 +54,7 @@ define amdgpu_kernel void @v_fneg_add_store_use_add_f32(float addrspace(1)* %out
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-SAFE: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]]
; GCN-SAFE: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[ADD]]
@@ -82,10 +82,10 @@ define amdgpu_kernel void @v_fneg_add_multi_use_add_f32(float addrspace(1)* %out
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-SAFE: v_subrev_f32_e32
+; GCN-SAFE: v_sub_f32_e32
; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000,
-; GCN-NSZ: v_subrev_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-NSZ: v_sub_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NSZ-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_fneg_add_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -106,10 +106,10 @@ define amdgpu_kernel void @v_fneg_add_fneg_x_f32(float addrspace(1)* %out, float
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-SAFE: v_subrev_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-SAFE: v_sub_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]]
-; GCN-NSZ: v_subrev_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GCN-NSZ: v_sub_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-NSZ-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_fneg_add_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -133,7 +133,7 @@ define amdgpu_kernel void @v_fneg_add_x_fneg_f32(float addrspace(1)* %out, float
; GCN-SAFE: v_sub_f32_e64 [[ADD:v[0-9]+]], -[[A]], [[B]]
; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]]
-; GCN-NSZ: v_add_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-NSZ: v_add_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NSZ-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_fneg_add_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -157,11 +157,11 @@ define amdgpu_kernel void @v_fneg_add_fneg_fneg_f32(float addrspace(1)* %out, fl
; GCN-SAFE: v_bfrev_b32_e32 [[SIGNBIT:v[0-9]+]], 1{{$}}
; GCN-SAFE: v_xor_b32_e32 [[NEG_A:v[0-9]+]], [[A]], [[SIGNBIT]]
-; GCN-SAFE: v_subrev_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GCN-SAFE: v_sub_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-SAFE: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], [[ADD]], [[SIGNBIT]]
; GCN-NSZ-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN-NSZ-DAG: v_subrev_f32_e32 [[NEG_ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-NSZ-DAG: v_sub_f32_e32 [[NEG_ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NSZ-NEXT: buffer_store_dword [[NEG_ADD]]
; GCN-NSZ-NEXT: buffer_store_dword [[NEG_A]]
define amdgpu_kernel void @v_fneg_add_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
@@ -185,10 +185,10 @@ define amdgpu_kernel void @v_fneg_add_store_use_fneg_x_f32(float addrspace(1)* %
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN-SAFE-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
-; GCN-SAFE-DAG: v_subrev_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GCN-SAFE-DAG: v_sub_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]]
-; GCN-NSZ-DAG: v_subrev_f32_e32 [[NEG_ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-NSZ-DAG: v_sub_f32_e32 [[NEG_ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NSZ-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
; GCN-NSZ-NEXT: buffer_store_dword [[NEG_ADD]]
; GCN-NSZ-NEXT: buffer_store_dword [[MUL]]
@@ -235,7 +235,7 @@ define amdgpu_kernel void @v_fneg_mul_f32(float addrspace(1)* %out, float addrsp
; GCN-LABEL: {{^}}v_fneg_mul_store_use_mul_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-DAG: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-DAG: v_xor_b32_e32 [[NEG_MUL:v[0-9]+]], 0x80000000, [[ADD]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL]]
; GCN: buffer_store_dword [[ADD]]
@@ -280,7 +280,7 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(float addrspace(1)* %out
; GCN-LABEL: {{^}}v_fneg_mul_fneg_x_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -300,7 +300,7 @@ define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(float addrspace(1)* %out, float
; GCN-LABEL: {{^}}v_fneg_mul_x_fneg_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -342,7 +342,7 @@ define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(float addrspace(1)* %out, fl
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_MUL:v[0-9]+]], [[B]], [[A]]
+; GCN-DAG: v_mul_f32_e32 [[NEG_MUL:v[0-9]+]], [[A]], [[B]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL]]
; GCN: buffer_store_dword [[NEG_A]]
define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
@@ -364,7 +364,7 @@ define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(float addrspace(1)* %
; GCN-LABEL: {{^}}v_fneg_mul_multi_use_fneg_x_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_MUL:v[0-9]+]], [[B]], [[A]]
+; GCN-DAG: v_mul_f32_e32 [[NEG_MUL:v[0-9]+]], [[A]], [[B]]
; GCN-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
; GCN-NEXT: buffer_store_dword [[NEG_MUL]]
; GCN: buffer_store_dword [[MUL]]
@@ -974,7 +974,7 @@ define amdgpu_kernel void @v_fneg_fma_multi_use_fneg_x_y_f32(float addrspace(1)*
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN-SAFE: v_mac_f32_e32 [[C]], [[B]], [[A]]
+; GCN-SAFE: v_mac_f32_e32 [[C]], [[A]], [[B]]
; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[C]]
; GCN-NSZ: v_mad_f32 [[RESULT:v[0-9]+]], [[A]], -[[B]], -[[C]]
@@ -1000,7 +1000,7 @@ define amdgpu_kernel void @v_fneg_fmad_f32(float addrspace(1)* %out, float addrs
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN-SAFE: v_mac_f32_e32 [[C]], [[B]], [[A]]
+; GCN-SAFE: v_mac_f32_e32 [[C]], [[A]], [[B]]
; GCN-SAFE: v_xor_b32_e32 [[NEG_MAD:v[0-9]+]], 0x80000000, [[C]]
; GCN-SAFE-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[C]]
@@ -1449,7 +1449,7 @@ define amdgpu_kernel void @v_fneg_mul_legacy_f32(float addrspace(1)* %out, float
; GCN-LABEL: {{^}}v_fneg_mul_legacy_store_use_mul_legacy_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-DAG: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-DAG: v_xor_b32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], 0x80000000, [[ADD]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL_LEGACY]]
; GCN: buffer_store_dword [[ADD]]
@@ -1494,7 +1494,7 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(float addr
; GCN-LABEL: {{^}}v_fneg_mul_legacy_fneg_x_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -1514,7 +1514,7 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(float addrspace(1)* %out
; GCN-LABEL: {{^}}v_fneg_mul_legacy_x_fneg_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -1556,7 +1556,7 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(float addrspace(1)* %
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN-DAG: v_mul_legacy_f32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], [[B]], [[A]]
+; GCN-DAG: v_mul_legacy_f32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], [[A]], [[B]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL_LEGACY]]
; GCN: buffer_store_dword [[NEG_A]]
define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
@@ -1578,7 +1578,7 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(float addrspac
; GCN-LABEL: {{^}}v_fneg_mul_legacy_multi_use_fneg_x_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_legacy_f32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], [[B]], [[A]]
+; GCN-DAG: v_mul_legacy_f32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], [[A]], [[B]]
; GCN-DAG: v_mul_legacy_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
; GCN-NEXT: buffer_store_dword [[NEG_MUL_LEGACY]]
; GCN: buffer_store_dword [[MUL]]
@@ -1664,7 +1664,7 @@ define amdgpu_kernel void @v_fneg_trunc_f32(float addrspace(1)* %out, float addr
; GCN-LABEL: {{^}}v_fneg_round_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_trunc_f32_e32
-; GCN: v_subrev_f32_e32
+; GCN: v_sub_f32_e32
; GCN: v_cndmask_b32
; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
@@ -1782,11 +1782,11 @@ define amdgpu_kernel void @v_fneg_interp_p2_f32(float addrspace(1)* %out, float
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[MUL0:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_f32_e32 [[MUL0:v[0-9]+]], [[A]], [[B]]
; GCN: s_cbranch_scc1
; GCN: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x80000000, [[MUL0]]
-; GCN: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[C]], [[XOR]]
+; GCN: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[XOR]], [[C]]
; GCN: buffer_store_dword [[MUL1]]
; GCN: buffer_store_dword [[MUL0]]
@@ -1851,7 +1851,7 @@ define amdgpu_kernel void @v_fneg_inlineasm_f32(float addrspace(1)* %out, float
; GCN-LABEL: {{^}}v_fneg_inlineasm_multi_use_src_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[A]], [[B]]
; GCN: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80000000, [[MUL]]
; GCN: ; use [[NEG]]
; GCN: buffer_store_dword [[MUL]]
@@ -1984,8 +1984,8 @@ define amdgpu_kernel void @multiuse_fneg_vop2_vop3_users_f32(float addrspace(1)*
; GCN-SAFE-DAG: v_mul_f32_e64 [[MUL2:v[0-9]+]], -[[FMA0]], [[D]]
; GCN-NSZ: v_fma_f32 [[FMA0:v[0-9]+]], [[A]], -[[B]], -2.0
-; GCN-NSZ-DAG: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[C]], [[FMA0]]
-; GCN-NSZ-DAG: v_mul_f32_e32 [[MUL2:v[0-9]+]], [[D]], [[FMA0]]
+; GCN-NSZ-DAG: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[FMA0]], [[C]]
+; GCN-NSZ-DAG: v_mul_f32_e32 [[MUL2:v[0-9]+]], [[FMA0]], [[D]]
; GCN: buffer_store_dword [[MUL1]]
; GCN-NEXT: buffer_store_dword [[MUL2]]
@@ -2084,7 +2084,7 @@ define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(float addrspace(1)*
; GCN: {{buffer|flat}}_load_dword [[D:v[0-9]+]]
; GCN: v_trunc_f32_e32 [[TRUNC_A:v[0-9]+]], [[A]]
; GCN-DAG: v_fma_f32 [[FMA0:v[0-9]+]], -[[TRUNC_A]], [[B]], [[C]]
-; GCN-DAG: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[D]], [[TRUNC_A]]
+; GCN-DAG: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[TRUNC_A]], [[D]]
; GCN: buffer_store_dword [[FMA0]]
; GCN: buffer_store_dword [[MUL1]]
define amdgpu_kernel void @multi_use_cost_to_fold_into_src(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, float addrspace(1)* %d.ptr) #0 {
diff --git a/test/CodeGen/AMDGPU/fneg-fabs.f16.ll b/test/CodeGen/AMDGPU/fneg-fabs.f16.ll
index f4afaca2b7a7..56aea641d16e 100644
--- a/test/CodeGen/AMDGPU/fneg-fabs.f16.ll
+++ b/test/CodeGen/AMDGPU/fneg-fabs.f16.ll
@@ -5,7 +5,7 @@
; GCN-LABEL: {{^}}fneg_fabs_fadd_f16:
; CI: v_cvt_f32_f16_e32
; CI: v_cvt_f32_f16_e64 [[CVT_ABS_X:v[0-9]+]], |v{{[0-9]+}}|
-; CI: v_subrev_f32_e32 v{{[0-9]+}}, [[CVT_ABS_X]], v{{[0-9]+}}
+; CI: v_sub_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[CVT_ABS_X]]
; GFX89-NOT: _and
; GFX89: v_sub_f16_e64 {{v[0-9]+}}, {{v[0-9]+}}, |{{v[0-9]+}}|
@@ -20,7 +20,7 @@ define amdgpu_kernel void @fneg_fabs_fadd_f16(half addrspace(1)* %out, half %x,
; GCN-LABEL: {{^}}fneg_fabs_fmul_f16:
; CI-DAG: v_cvt_f32_f16_e32
; CI-DAG: v_cvt_f32_f16_e64 [[CVT_NEG_ABS_X:v[0-9]+]], -|{{v[0-9]+}}|
-; CI: v_mul_f32_e32 {{v[0-9]+}}, [[CVT_NEG_ABS_X]], {{v[0-9]+}}
+; CI: v_mul_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, [[CVT_NEG_ABS_X]]
; CI: v_cvt_f16_f32_e32
; GFX89-NOT: _and
diff --git a/test/CodeGen/AMDGPU/fneg-fabs.ll b/test/CodeGen/AMDGPU/fneg-fabs.ll
index 0a7346f410c9..3f20ca73e922 100644
--- a/test/CodeGen/AMDGPU/fneg-fabs.ll
+++ b/test/CodeGen/AMDGPU/fneg-fabs.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}fneg_fabs_fadd_f32:
; SI-NOT: and
diff --git a/test/CodeGen/AMDGPU/fneg.f16.ll b/test/CodeGen/AMDGPU/fneg.f16.ll
index 2d94726cbe20..49d674252746 100644
--- a/test/CodeGen/AMDGPU/fneg.f16.ll
+++ b/test/CodeGen/AMDGPU/fneg.f16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=kaveri -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=CIVI -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN -check-prefix=GFX89 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN -check-prefix=GFX89 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=kaveri -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=CIVI -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN -check-prefix=GFX89 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN -check-prefix=GFX89 %s
; FIXME: Should be able to do scalar op
; GCN-LABEL: {{^}}s_fneg_f16:
@@ -46,7 +46,7 @@ define amdgpu_kernel void @fneg_free_f16(half addrspace(1)* %out, i16 %in) #0 {
; CI-DAG: v_cvt_f32_f16_e32 [[CVT_VAL:v[0-9]+]], [[NEG_VALUE]]
; CI-DAG: v_cvt_f32_f16_e64 [[NEG_CVT0:v[0-9]+]], -[[NEG_VALUE]]
-; CI: v_mul_f32_e32 [[MUL:v[0-9]+]], [[CVT_VAL]], [[NEG_CVT0]]
+; CI: v_mul_f32_e32 [[MUL:v[0-9]+]], [[NEG_CVT0]], [[CVT_VAL]]
; CI: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], [[MUL]]
; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVT1]]
diff --git a/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir b/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
index 986c6b296c96..3155b7a8664f 100644
--- a/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
+++ b/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
@@ -1,26 +1,5 @@
# RUN: llc -march=amdgcn -run-pass peephole-opt -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
-
---- |
- define amdgpu_kernel void @no_fold_imm_madak_mac_clamp_f32() #0 {
- ret void
- }
-
- define amdgpu_kernel void @no_fold_imm_madak_mac_omod_f32() #0 {
- ret void
- }
-
- define amdgpu_kernel void @no_fold_imm_madak_mad_clamp_f32() #0 {
- ret void
- }
-
- define amdgpu_kernel void @no_fold_imm_madak_mad_omod_f32() #0 {
- ret void
- }
-
- attributes #0 = { nounwind }
-
...
----
# GCN-LABEL: name: no_fold_imm_madak_mac_clamp_f32
# GCN: %23 = V_MOV_B32_e32 1090519040, implicit %exec
# GCN-NEXT: %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
@@ -62,14 +41,14 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
+ %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
%27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%28 = REG_SEQUENCE %3, 1, %27, 2
%11 = S_MOV_B32 61440
@@ -133,14 +112,14 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
+ %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
%27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%28 = REG_SEQUENCE %3, 1, %27, 2
%11 = S_MOV_B32 61440
@@ -204,14 +183,14 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
+ %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
%27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%28 = REG_SEQUENCE %3, 1, %27, 2
%11 = S_MOV_B32 61440
@@ -275,14 +254,14 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
+ %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
%27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%28 = REG_SEQUENCE %3, 1, %27, 2
%11 = S_MOV_B32 61440
diff --git a/test/CodeGen/AMDGPU/fold-operands-order.mir b/test/CodeGen/AMDGPU/fold-operands-order.mir
index afde89d6b64b..51bb357fcf6e 100644
--- a/test/CodeGen/AMDGPU/fold-operands-order.mir
+++ b/test/CodeGen/AMDGPU/fold-operands-order.mir
@@ -1,10 +1,4 @@
# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs -run-pass si-fold-operands -o - %s | FileCheck -check-prefix=GCN %s
-
---- |
- define amdgpu_kernel void @mov_in_use_list_2x() {
- unreachable
- }
-
...
---
diff --git a/test/CodeGen/AMDGPU/fp32_to_fp16.ll b/test/CodeGen/AMDGPU/fp32_to_fp16.ll
index 2c6b1cb18f7e..579a1454dd9a 100644
--- a/test/CodeGen/AMDGPU/fp32_to_fp16.ll
+++ b/test/CodeGen/AMDGPU/fp32_to_fp16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/fpext.f16.ll b/test/CodeGen/AMDGPU/fpext.f16.ll
index 15cc73b9ee53..ec19fd199b4e 100644
--- a/test/CodeGen/AMDGPU/fpext.f16.ll
+++ b/test/CodeGen/AMDGPU/fpext.f16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 %s
; GCN-LABEL: {{^}}fpext_f16_to_f32
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
@@ -154,7 +154,7 @@ entry:
; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
; GCN-DAG: v_cvt_f32_f16_e64 [[CVTA_NEG:v[0-9]+]], -[[A]]
; SI-DAG: v_cvt_f32_f16_e32 [[CVTA:v[0-9]+]], [[A]]
-; SI: v_mul_f32_e32 [[MUL_F32:v[0-9]+]], [[CVTA]], [[CVTA_NEG]]
+; SI: v_mul_f32_e32 [[MUL_F32:v[0-9]+]], [[CVTA_NEG]], [[CVTA]]
; SI: v_cvt_f16_f32_e32 [[MUL:v[0-9]+]], [[MUL_F32]]
; GFX89-DAG: v_cvt_f32_f16_e64 [[CVT_NEGA:v[0-9]+]], -[[A]]
diff --git a/test/CodeGen/AMDGPU/fptosi.f16.ll b/test/CodeGen/AMDGPU/fptosi.f16.ll
index f310618d8bdb..f593030764a9 100644
--- a/test/CodeGen/AMDGPU/fptosi.f16.ll
+++ b/test/CodeGen/AMDGPU/fptosi.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}fptosi_f16_to_i16
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
@@ -60,7 +60,7 @@ entry:
; SI: v_cvt_i32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
; SI: v_and_b32_e32 v[[R_I16_LO:[0-9]+]], 0xffff, v[[R_I16_0]]
; SI: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
-; SI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_LO]]
+; SI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_LO]], v[[R_I16_HI]]
; VI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; VI: v_cvt_f32_f16_sdwa v[[A_F32_1:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
diff --git a/test/CodeGen/AMDGPU/fptoui.f16.ll b/test/CodeGen/AMDGPU/fptoui.f16.ll
index 7641c08e33c3..cebe3304d542 100644
--- a/test/CodeGen/AMDGPU/fptoui.f16.ll
+++ b/test/CodeGen/AMDGPU/fptoui.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}fptoui_f16_to_i16
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
@@ -60,7 +60,7 @@ entry:
; SI: v_cvt_u32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
; SI: v_cvt_u32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
; SI: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
-; SI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_0]]
+; SI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_0]], v[[R_I16_HI]]
; VI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_cvt_f32_f16_sdwa v[[A_F32_0:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
diff --git a/test/CodeGen/AMDGPU/fptrunc.f16.ll b/test/CodeGen/AMDGPU/fptrunc.f16.ll
index bc72f4424c98..64df625d4bb5 100644
--- a/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=SIVI %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global,-fp64-fp16-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=SIVI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global,-fp64-fp16-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
; GCN-LABEL: {{^}}fptrunc_f32_to_f16:
; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
@@ -38,10 +38,10 @@ entry:
; GCN-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[A_F32_0]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_cvt_f16_f32_sdwa v[[R_F16_1:[0-9]+]], v[[A_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GFX9-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
; GFX9: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
@@ -68,7 +68,7 @@ entry:
; VI: v_cvt_f16_f32_sdwa v[[R_F16_HI:[0-9]+]], v[[A_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; SIVI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SIVI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GFX9-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
; GFX9: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
diff --git a/test/CodeGen/AMDGPU/fract.f64.ll b/test/CodeGen/AMDGPU/fract.f64.ll
index 9a56cbe983cd..1314dfe3c7ca 100644
--- a/test/CodeGen/AMDGPU/fract.f64.ll
+++ b/test/CodeGen/AMDGPU/fract.f64.ll
@@ -1,9 +1,9 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=SI-UNSAFE -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=VI-UNSAFE -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=SI-UNSAFE -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=VI-UNSAFE -check-prefix=FUNC %s
declare double @llvm.fabs.f64(double) #0
declare double @llvm.floor.f64(double) #0
diff --git a/test/CodeGen/AMDGPU/fract.ll b/test/CodeGen/AMDGPU/fract.ll
index 207fe280c9a6..2217f67da7d3 100644
--- a/test/CodeGen/AMDGPU/fract.ll
+++ b/test/CodeGen/AMDGPU/fract.ll
@@ -1,15 +1,15 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SAFE -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SAFE -check-prefix=GCN -check-prefix=CI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SAFE -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SAFE -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SAFE -check-prefix=GCN -check-prefix=CI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SAFE -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=GCN %s
declare float @llvm.fabs.f32(float) #0
declare float @llvm.floor.f32(float) #0
; GCN-LABEL: {{^}}fract_f32:
; GCN-SAFE: v_floor_f32_e32 [[FLR:v[0-9]+]], [[INPUT:v[0-9]+]]
-; GCN-SAFE: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[FLR]], [[INPUT]]
+; GCN-SAFE: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[INPUT]], [[FLR]]
; GCN-UNSAFE: v_fract_f32_e32 [[RESULT:v[0-9]+]], [[INPUT:v[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/frem.ll b/test/CodeGen/AMDGPU/frem.ll
index 9778069d0477..3b8f58cc18a7 100644
--- a/test/CodeGen/AMDGPU/frem.ll
+++ b/test/CodeGen/AMDGPU/frem.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}frem_f32:
; GCN-DAG: buffer_load_dword [[X:v[0-9]+]], {{.*$}}
@@ -29,7 +29,7 @@ define amdgpu_kernel void @frem_f32(float addrspace(1)* %out, float addrspace(1)
; GCN: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
; GCN: buffer_load_dword [[X:v[0-9]+]], {{.*}}
; GCN: v_rcp_f32_e32 [[INVY:v[0-9]+]], [[Y]]
-; GCN: v_mul_f32_e32 [[DIV:v[0-9]+]], [[INVY]], [[X]]
+; GCN: v_mul_f32_e32 [[DIV:v[0-9]+]], [[X]], [[INVY]]
; GCN: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
; GCN: v_mad_f32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]]
; GCN: buffer_store_dword [[RESULT]]
diff --git a/test/CodeGen/AMDGPU/fsqrt.f64.ll b/test/CodeGen/AMDGPU/fsqrt.f64.ll
index 453d8fb37f2f..186757e4c5d8 100644
--- a/test/CodeGen/AMDGPU/fsqrt.f64.ll
+++ b/test/CodeGen/AMDGPU/fsqrt.f64.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}v_safe_fsqrt_f64:
; GCN: v_sqrt_f64_e32 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/AMDGPU/fsqrt.ll b/test/CodeGen/AMDGPU/fsqrt.ll
index a0fd3411ca05..6bd9a0db14f6 100644
--- a/test/CodeGen/AMDGPU/fsqrt.ll
+++ b/test/CodeGen/AMDGPU/fsqrt.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; Run with unsafe-fp-math to make sure nothing tries to turn this into 1 / rsqrt(x)
diff --git a/test/CodeGen/AMDGPU/fsub.f16.ll b/test/CodeGen/AMDGPU/fsub.f16.ll
index fa00c06546db..15a4ce2d88f7 100644
--- a/test/CodeGen/AMDGPU/fsub.f16.ll
+++ b/test/CodeGen/AMDGPU/fsub.f16.ll
@@ -1,15 +1,15 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=VI -check-prefix=SIVI %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=VI -check-prefix=SIVI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
; GCN-LABEL: {{^}}fsub_f16:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_subrev_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_sub_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; GFX89: v_subrev_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; GFX89: v_sub_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fsub_f16(
@@ -70,16 +70,16 @@ entry:
; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI-DAG: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_subrev_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_subrev_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_sub_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_sub_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
-; VI-DAG: v_subrev_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_sub_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]]
; VI-DAG: v_sub_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GFX9: v_pk_add_f16 v[[R_V2_F16:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]] neg_lo:[0,1] neg_hi:[0,1]
@@ -109,12 +109,12 @@ entry:
; SI: v_sub_f32_e32 v[[R_F32_1:[0-9]+]], 2.0, v[[B_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_mov_b32_e32 [[CONST2:v[0-9]+]], 0x4000
; VI-DAG: v_sub_f16_sdwa v[[R_F16_HI:[0-9]+]], [[CONST2]], v[[B_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-DAG: v_sub_f16_e32 v[[R_F16_0:[0-9]+]], 1.0, v[[B_V2_F16]]
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GFX9: s_mov_b32 [[K:s[0-9]+]], 0x40003c00
; GFX9: v_pk_add_f16 v[[R_V2_F16:[0-9]+]], v[[B_V2_F16]], [[K]] neg_lo:[1,0] neg_hi:[1,0]
@@ -143,12 +143,12 @@ entry:
; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], -1.0, v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_mov_b32_e32 [[CONSTM1:v[0-9]+]], 0xbc00
; VI-DAG: v_add_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[A_V2_F16]], [[CONSTM1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-DAG: v_add_f16_e32 v[[R_F16_0:[0-9]+]], -2.0, v[[A_V2_F16]]
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GFX9: s_mov_b32 [[K:s[0-9]+]], 0xbc00c000
; GFX9: v_pk_add_f16 v[[R_V2_F16:[0-9]+]], v[[A_V2_F16]], [[K]]{{$}}
diff --git a/test/CodeGen/AMDGPU/fsub.ll b/test/CodeGen/AMDGPU/fsub.ll
index e7a92d95d485..48647a2cdb89 100644
--- a/test/CodeGen/AMDGPU/fsub.ll
+++ b/test/CodeGen/AMDGPU/fsub.ll
@@ -1,9 +1,9 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}v_fsub_f32:
-; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
define amdgpu_kernel void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%a = load float, float addrspace(1)* %in, align 4
@@ -41,10 +41,10 @@ define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
-; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
define amdgpu_kernel void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float>, <4 x float> addrspace(1)* %in, align 16
@@ -67,7 +67,7 @@ define amdgpu_kernel void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x flo
}
; FUNC-LABEL: {{^}}v_fneg_fsub_f32:
-; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
define amdgpu_kernel void @v_fneg_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
@@ -80,7 +80,7 @@ define amdgpu_kernel void @v_fneg_fsub_f32(float addrspace(1)* %out, float addrs
}
; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_f32:
-; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
; SI-NOT: xor
define amdgpu_kernel void @v_fneg_fsub_nsz_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
@@ -93,7 +93,7 @@ define amdgpu_kernel void @v_fneg_fsub_nsz_f32(float addrspace(1)* %out, float a
}
; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_attribute_f32:
-; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
; SI-NOT: xor
define amdgpu_kernel void @v_fneg_fsub_nsz_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
@@ -109,7 +109,7 @@ define amdgpu_kernel void @v_fneg_fsub_nsz_attribute_f32(float addrspace(1)* %ou
; make sure it is disabled and the fneg is not folded if it is not
; "true".
; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_false_attribute_f32:
-; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
define amdgpu_kernel void @v_fneg_fsub_nsz_false_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/AMDGPU/fsub64.ll b/test/CodeGen/AMDGPU/fsub64.ll
index dc332414a152..73f1a69eeb9d 100644
--- a/test/CodeGen/AMDGPU/fsub64.ll
+++ b/test/CodeGen/AMDGPU/fsub64.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare double @llvm.fabs.f64(double) #0
diff --git a/test/CodeGen/AMDGPU/ftrunc.f64.ll b/test/CodeGen/AMDGPU/ftrunc.f64.ll
index 1f72ec65588e..bb2a6ba8e348 100644
--- a/test/CodeGen/AMDGPU/ftrunc.f64.ll
+++ b/test/CodeGen/AMDGPU/ftrunc.f64.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
declare double @llvm.trunc.f64(double) nounwind readnone
declare <2 x double> @llvm.trunc.v2f64(<2 x double>) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/global-extload-i16.ll b/test/CodeGen/AMDGPU/global-extload-i16.ll
index 19e592f50bea..4e50f995d27e 100644
--- a/test/CodeGen/AMDGPU/global-extload-i16.ll
+++ b/test/CodeGen/AMDGPU/global-extload-i16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; XUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FIXME: cypress is broken because the bigger testcases spill and it's not implemented
diff --git a/test/CodeGen/AMDGPU/global-smrd-unknown.ll b/test/CodeGen/AMDGPU/global-smrd-unknown.ll
new file mode 100644
index 000000000000..8a576e6480a1
--- /dev/null
+++ b/test/CodeGen/AMDGPU/global-smrd-unknown.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -memdep-block-scan-limit=1 -amdgpu-scalarize-global-loads -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}unknown_memdep_analysis:
+; GCN: flat_load_dword
+; GCN: flat_load_dword
+; GCN: flat_store_dword
+define amdgpu_kernel void @unknown_memdep_analysis(float addrspace(1)* nocapture readonly %arg) #0 {
+bb:
+ %tmp53 = load float, float addrspace(1)* undef, align 4
+ %tmp54 = getelementptr inbounds float, float addrspace(1)* %arg, i32 31
+ %tmp55 = load float, float addrspace(1)* %tmp54, align 4
+ %tmp56 = tail call float @llvm.fmuladd.f32(float undef, float %tmp53, float %tmp55)
+ store float %tmp56, float addrspace(1)* undef, align 4
+ ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/half.ll b/test/CodeGen/AMDGPU/half.ll
index 41ae5a4a0b00..43745d4b3da3 100644
--- a/test/CodeGen/AMDGPU/half.ll
+++ b/test/CodeGen/AMDGPU/half.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; half args should be promoted to float for SI and lower.
@@ -17,7 +17,7 @@ define amdgpu_kernel void @load_f16_arg(half addrspace(1)* %out, half %arg) #0 {
; GCN-DAG: buffer_load_ushort [[V0:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:44
; GCN-DAG: buffer_load_ushort [[V1:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:46
; GCN: v_lshlrev_b32_e32 [[HI:v[0-9]+]], 16, [[V1]]
-; GCN: v_or_b32_e32 [[PACKED:v[0-9]+]], [[HI]], [[V0]]
+; GCN: v_or_b32_e32 [[PACKED:v[0-9]+]], [[V0]], [[HI]]
; GCN: buffer_store_dword [[PACKED]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN: s_endpgm
define amdgpu_kernel void @load_v2f16_arg(<2 x half> addrspace(1)* %out, <2 x half> %arg) #0 {
@@ -471,10 +471,10 @@ define amdgpu_kernel void @global_truncstore_f32_to_f16(half addrspace(1)* %out,
; SI-DAG: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], v[[HI]]
; SI-DAG: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[CVT1]]
-; SI: v_or_b32_e32 [[PACKED:v[0-9]+]], [[SHL]], [[CVT0]]
+; SI: v_or_b32_e32 [[PACKED:v[0-9]+]], [[CVT0]], [[SHL]]
; VI-DAG: v_cvt_f16_f32_sdwa [[CVT1:v[0-9]+]], v[[HI]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; VI: v_or_b32_e32 [[PACKED:v[0-9]+]], [[CVT1]], [[CVT0]]
+; VI: v_or_b32_e32 [[PACKED:v[0-9]+]], [[CVT0]], [[CVT1]]
; GCN-DAG: buffer_store_dword [[PACKED]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/imm.ll b/test/CodeGen/AMDGPU/imm.ll
index c2668a077b09..8cda01a10f76 100644
--- a/test/CodeGen/AMDGPU/imm.ll
+++ b/test/CodeGen/AMDGPU/imm.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; Use a 64-bit value with lo bits that can be represented as an inline constant
; GCN-LABEL: {{^}}i64_imm_inline_lo:
diff --git a/test/CodeGen/AMDGPU/immv216.ll b/test/CodeGen/AMDGPU/immv216.ll
index cd3502baee7b..fe86a5872968 100644
--- a/test/CodeGen/AMDGPU/immv216.ll
+++ b/test/CodeGen/AMDGPU/immv216.ll
@@ -1,6 +1,6 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
; FIXME: Merge into imm.ll
; GCN-LABEL: {{^}}store_inline_imm_neg_0.0_v2i16:
@@ -305,7 +305,7 @@ define amdgpu_kernel void @commute_add_inline_imm_0.5_v2f16(<2 x half> addrspace
; VI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x6400{{$}}
; VI-DAG: buffer_load_dword
; VI-NOT: and
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, [[K]], v{{[0-9]+}}
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]]
; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, [[K]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: buffer_store_dword
diff --git a/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index 0d20c32a4770..62200b988bea 100644
--- a/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -1,7 +1,7 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-vgpr-index-mode -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-vgpr-index-mode -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s
; Tests for indirect addressing on SI, which is implemented using dynamic
; indexing of vectors.
diff --git a/test/CodeGen/AMDGPU/inline-asm.ll b/test/CodeGen/AMDGPU/inline-asm.ll
index c0f5218efc16..75826d530cb0 100644
--- a/test/CodeGen/AMDGPU/inline-asm.ll
+++ b/test/CodeGen/AMDGPU/inline-asm.ll
@@ -222,9 +222,9 @@ entry:
; FIXME: Should be scheduled to shrink vcc
; CHECK-LABEL: {{^}}i1_input_phys_vgpr_x2:
; CHECK: v_cmp_eq_u32_e32 vcc, 1, v0
-; CHECK: v_cmp_eq_u32_e64 s[0:1], 1, v1
; CHECK: v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK: v_cndmask_b32_e64 v1, 0, -1, s[0:1]
+; CHECK: v_cmp_eq_u32_e32 vcc, 1, v1
+; CHECK: v_cndmask_b32_e64 v1, 0, -1, vcc
define amdgpu_kernel void @i1_input_phys_vgpr_x2() {
entry:
%val0 = load volatile i1, i1 addrspace(1)* undef
diff --git a/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll b/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll
index 5cd965d2fa9c..eea26192ed32 100644
--- a/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll
+++ b/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -amdgpu-load-store-vectorizer=0 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -amdgpu-load-store-vectorizer=0 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GatherAllAliases gives up on trying to analyze cases where the
; pointer may have been loaded from an aliased store, so make sure
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll
index f08d4b6c7915..06dc2cc8b90e 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.fabs.f16(half %a)
declare i1 @llvm.amdgcn.class.f16(half %a, i32 %b)
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll
index 1fcdac537fba..f71b9752e9a1 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i1 @llvm.amdgcn.class.f32(float, i32) #1
declare i1 @llvm.amdgcn.class.f64(double, i32) #1
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
index 2cc63ae74bf1..1b3e09a81e5a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN -check-prefix=SI %s
; XUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN -check-prefix=VI %s
; FIXME: Enable for VI.
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll
index fe211d356070..7068f4559055 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.amdgcn.ldexp.f16(half %a, i32 %b)
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll
index 593c95856811..871b8c4f99b9 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}bfe_i32_arg_arg_arg:
; GCN: v_bfe_i32
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll
index 495e36b09f8f..39370e41e8aa 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
declare i32 @llvm.amdgcn.sffbh.i32(i32) #1
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll
index e0cec2134e70..8468aa3a7b3e 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare double @llvm.amdgcn.trig.preop.f64(double, i32) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
index 92e3a1099da0..68fd08f778c4 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}bfe_u32_arg_arg_arg:
; GCN: v_bfe_u32
diff --git a/test/CodeGen/AMDGPU/llvm.ceil.f16.ll b/test/CodeGen/AMDGPU/llvm.ceil.f16.ll
index 0604a49372a2..071f2a6de4cd 100644
--- a/test/CodeGen/AMDGPU/llvm.ceil.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.ceil.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.ceil.f16(half %a)
declare <2 x half> @llvm.ceil.v2f16(<2 x half> %a)
@@ -33,12 +33,12 @@ entry:
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: and
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_ceil_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_ceil_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
; VI-NOT: and
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.cos.f16.ll b/test/CodeGen/AMDGPU/llvm.cos.f16.ll
index d836ea36ef63..8931de63e74b 100644
--- a/test/CodeGen/AMDGPU/llvm.cos.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.cos.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.cos.f16(half %a)
declare <2 x half> @llvm.cos.v2f16(<2 x half> %a)
@@ -29,8 +29,8 @@ entry:
; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; SI-DAG: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[HALF_PIE]], v[[A_F32_0]]
-; SI-DAG: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[HALF_PIE]], v[[A_F32_1]]
+; SI-DAG: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[A_F32_0]], v[[HALF_PIE]]
+; SI-DAG: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[A_F32_1]], v[[HALF_PIE]]
; VI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_cvt_f32_f16_sdwa v[[A_F32_1:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
@@ -48,8 +48,8 @@ entry:
; GCN-NOT: and
; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @cos_v2f16(
diff --git a/test/CodeGen/AMDGPU/llvm.exp2.f16.ll b/test/CodeGen/AMDGPU/llvm.exp2.f16.ll
index 5757142b9e95..4e96a7619716 100644
--- a/test/CodeGen/AMDGPU/llvm.exp2.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.exp2.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.exp2.f16(half %a)
declare <2 x half> @llvm.exp2.v2f16(<2 x half> %a)
@@ -33,12 +33,12 @@ entry:
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: and
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_exp_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_exp_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
; VI-NOT: and
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.floor.f16.ll b/test/CodeGen/AMDGPU/llvm.floor.f16.ll
index 6a18141d8035..74d1e694ffbe 100644
--- a/test/CodeGen/AMDGPU/llvm.floor.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.floor.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.floor.f16(half %a)
declare <2 x half> @llvm.floor.v2f16(<2 x half> %a)
@@ -33,12 +33,12 @@ entry:
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: and
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_floor_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_floor_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
; VI-NOT: and
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.fma.f16.ll b/test/CodeGen/AMDGPU/llvm.fma.f16.ll
index 3f4fba7d8ead..a379b18ffb8b 100644
--- a/test/CodeGen/AMDGPU/llvm.fma.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.fma.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.fma.f16(half %a, half %b, half %c)
declare <2 x half> @llvm.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
@@ -128,7 +128,7 @@ define amdgpu_kernel void @fma_f16_imm_c(
; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; GCN-NOT: and
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fma_v2f16(
@@ -167,7 +167,7 @@ define amdgpu_kernel void @fma_v2f16(
; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; GCN-NOT: and
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fma_v2f16_imm_a(
@@ -210,7 +210,7 @@ define amdgpu_kernel void @fma_v2f16_imm_a(
; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; GCN-NOT: and
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fma_v2f16_imm_b(
@@ -253,7 +253,7 @@ define amdgpu_kernel void @fma_v2f16_imm_b(
; VI-DAG: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16]]
; GCN-NOT: and
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @fma_v2f16_imm_c(
diff --git a/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll b/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll
index 806723e5136c..2d4fe08d8bde 100644
--- a/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll
@@ -1,7 +1,7 @@
-; RUN: llc -march=amdgcn -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SI-FLUSH %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-FLUSH %s
-; RUN: llc -march=amdgcn -mattr=+fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SI-DENORM %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-DENORM %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SI-FLUSH %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-FLUSH %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=+fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SI-DENORM %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=+fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-DENORM %s
declare half @llvm.fmuladd.f16(half %a, half %b, half %c)
declare <2 x half> @llvm.fmuladd.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
@@ -13,11 +13,11 @@ declare <2 x half> @llvm.fmuladd.v2f16(<2 x half> %a, <2 x half> %b, <2 x half>
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
-; SI: v_mac_f32_e32 v[[C_F32]], v[[B_F32]], v[[A_F32]]
+; SI: v_mac_f32_e32 v[[C_F32]], v[[A_F32]], v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[C_F32]]
; SI: buffer_store_short v[[R_F16]]
-; VI-FLUSH: v_mac_f16_e32 v[[C_F16]], v[[B_F16]], v[[A_F16]]
+; VI-FLUSH: v_mac_f16_e32 v[[C_F16]], v[[A_F16]], v[[B_F16]]
; VI-FLUSH: buffer_store_short v[[C_F16]]
; VI-DENORM: v_fma_f16 [[RESULT:v[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
@@ -110,19 +110,19 @@ define amdgpu_kernel void @fmuladd_f16_imm_b(
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
-; SI: v_mac_f32_e32 v[[C_F32_0]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_mac_f32_e32 v[[C_F32_1]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_mac_f32_e32 v[[C_F32_0]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_mac_f32_e32 v[[C_F32_1]], v[[A_F32_1]], v[[B_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[C_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_LO:[0-9]+]], v[[C_F32_0]]
; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_LO]], v[[R_F16_HI]]
; VI-FLUSH: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; VI-FLUSH-DAG: v_mac_f16_sdwa v[[A_F16_1]], v[[B_V2_F16]], v[[C_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-FLUSH-DAG: v_mac_f16_e32 v[[A_V2_F16]], v[[C_V2_F16]], v[[B_V2_F16]]
+; VI-FLUSH-DAG: v_mac_f16_e32 v[[A_V2_F16]], v[[B_V2_F16]], v[[C_V2_F16]]
; VI-FLUSH-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[A_F16_1]]
; VI-FLUSH-NOT: v_and_b32
-; VI-FLUSH: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[A_V2_F16]]
+; VI-FLUSH: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[A_V2_F16]], v[[R_F16_HI]]
; VI-DENORM: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; VI-DENORM: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
@@ -131,7 +131,7 @@ define amdgpu_kernel void @fmuladd_f16_imm_b(
; VI-DENORM-DAG: v_fma_f16 v[[RES1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16_1]]
; VI-DENORM-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[RES1]]
; VI-DENORM-NOT: v_and_b32
-; VI-DENORM: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[RES0]]
+; VI-DENORM: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[RES0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.log2.f16.ll b/test/CodeGen/AMDGPU/llvm.log2.f16.ll
index 773eb55283e4..277195c53208 100644
--- a/test/CodeGen/AMDGPU/llvm.log2.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.log2.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.log2.f16(half %a)
declare <2 x half> @llvm.log2.v2f16(<2 x half> %a)
@@ -33,12 +33,12 @@ entry:
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: and
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_log_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_log_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
; VI-NOT: and
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll b/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
index 8f4b314ffabb..c72716439a76 100644
--- a/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.maxnum.f16(half %a, half %b)
declare <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b)
@@ -9,9 +9,9 @@ declare <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b)
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_max_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; VI: v_max_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @maxnum_f16(
@@ -73,18 +73,18 @@ entry:
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI-DAG: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI-DAG: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: and
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
-; VI-DAG: v_max_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_max_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]]
; VI-DAG: v_max_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NOT: and
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
@@ -115,7 +115,7 @@ entry:
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; GCN-NOT: and
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @maxnum_v2f16_imm_a(
@@ -143,7 +143,7 @@ entry:
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; GCN-NOT: and
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @maxnum_v2f16_imm_b(
diff --git a/test/CodeGen/AMDGPU/llvm.minnum.f16.ll b/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
index 1a86286f7136..0e93acc27dc5 100644
--- a/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.minnum.f16(half %a, half %b)
declare <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b)
@@ -9,9 +9,9 @@ declare <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b)
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @minnum_f16(
@@ -72,18 +72,18 @@ entry:
; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI-DAG: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI-DAG: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: and
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
-; VI-DAG: v_min_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_min_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]]
; VI-DAG: v_min_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NOT: and
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
@@ -116,7 +116,7 @@ entry:
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; GCN-NOT: and
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @minnum_v2f16_imm_a(
@@ -144,7 +144,7 @@ entry:
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; GCN-NOT: and
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @minnum_v2f16_imm_b(
diff --git a/test/CodeGen/AMDGPU/llvm.rint.f16.ll b/test/CodeGen/AMDGPU/llvm.rint.f16.ll
index 30cb969a76e5..92282083984b 100644
--- a/test/CodeGen/AMDGPU/llvm.rint.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.rint.f16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SIVI -check-prefix=VI -check-prefix=GFX89 %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SIVI -check-prefix=VI -check-prefix=GFX89 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
declare half @llvm.rint.f16(half %a)
declare <2 x half> @llvm.rint.v2f16(<2 x half> %a)
@@ -34,12 +34,12 @@ entry:
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: v_and_b32
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_rndne_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_rndne_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
; VI-NOT: v_and_b32
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GFX9: v_rndne_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
; GFX9: v_rndne_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
diff --git a/test/CodeGen/AMDGPU/llvm.round.ll b/test/CodeGen/AMDGPU/llvm.round.ll
index ffe87977870b..7e29147571f2 100644
--- a/test/CodeGen/AMDGPU/llvm.round.ll
+++ b/test/CodeGen/AMDGPU/llvm.round.ll
@@ -12,7 +12,7 @@
; GCN: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[K]], 1.0, [[VX]]
; GCN: v_cmp_ge_f32_e64 vcc, |[[SUB]]|, 0.5
; GCN: v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, [[VX]]
-; GCN: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]]
+; GCN: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TRUNC]], [[SEL]]
; GCN: buffer_store_dword [[RESULT]]
; R600: TRUNC {{.*}}, [[ARG:KC[0-9]\[[0-9]+\]\.[XYZW]]]
@@ -70,7 +70,7 @@ define amdgpu_kernel void @round_v8f32(<8 x float> addrspace(1)* %out, <8 x floa
; GFX89: v_sub_f16_e32 [[SUB:v[0-9]+]], [[SX]], [[TRUNC]]
; GFX89: v_cmp_ge_f16_e64 vcc, |[[SUB]]|, 0.5
; GFX89: v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, [[COPYSIGN]]
-; GFX89: v_add_f16_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]]
+; GFX89: v_add_f16_e32 [[RESULT:v[0-9]+]], [[TRUNC]], [[SEL]]
; GFX89: buffer_store_short [[RESULT]]
define amdgpu_kernel void @round_f16(half addrspace(1)* %out, i32 %x.arg) #0 {
%x.arg.trunc = trunc i32 %x.arg to i16
diff --git a/test/CodeGen/AMDGPU/llvm.sin.f16.ll b/test/CodeGen/AMDGPU/llvm.sin.f16.ll
index eb1f32c981f8..08b9d9d873b4 100644
--- a/test/CodeGen/AMDGPU/llvm.sin.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.sin.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.sin.f16(half %a)
declare <2 x half> @llvm.sin.v2f16(<2 x half> %a)
@@ -29,9 +29,9 @@ entry:
; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; SI-DAG: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[HALF_PIE]], v[[A_F32_0]]
+; SI-DAG: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[A_F32_0]], v[[HALF_PIE]]
; SI-DAG: v_fract_f32_e32 v[[F_F32_0:[0-9]+]], v[[M_F32_0]]
-; SI-DAG: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[HALF_PIE]], v[[A_F32_1]]
+; SI-DAG: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[A_F32_1]], v[[HALF_PIE]]
; SI-DAG: v_fract_f32_e32 v[[F_F32_1:[0-9]+]], v[[M_F32_1]]
; VI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
@@ -47,10 +47,10 @@ entry:
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_cvt_f16_f32_sdwa v[[R_F16_1:[0-9]+]], v[[R_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll b/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll
index 46ee6526aca2..0e1358ecca22 100644
--- a/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.sqrt.f16(half %a)
declare <2 x half> @llvm.sqrt.v2f16(<2 x half> %a)
@@ -33,12 +33,12 @@ entry:
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: v_and_b32
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_sqrt_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_sqrt_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
; VI-NOT: v_and_b32
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.trunc.f16.ll b/test/CodeGen/AMDGPU/llvm.trunc.f16.ll
index dc7182aa0d89..37ee4e92c637 100644
--- a/test/CodeGen/AMDGPU/llvm.trunc.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.trunc.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
declare half @llvm.trunc.f16(half %a)
declare <2 x half> @llvm.trunc.v2f16(<2 x half> %a)
@@ -33,12 +33,12 @@ entry:
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; SI-NOT: v_and_b32
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_HI]]
; VI-DAG: v_trunc_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
; VI-DAG: v_trunc_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
; VI-NOT: v_and_b32
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_0]], v[[R_F16_1]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/load-global-f32.ll b/test/CodeGen/AMDGPU/load-global-f32.ll
index bd6fea587b42..77557a584093 100644
--- a/test/CodeGen/AMDGPU/load-global-f32.ll
+++ b/test/CodeGen/AMDGPU/load-global-f32.ll
@@ -1,9 +1,9 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}global_load_f32:
; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}
diff --git a/test/CodeGen/AMDGPU/load-global-f64.ll b/test/CodeGen/AMDGPU/load-global-f64.ll
index 5b772e1fe5ee..84214b7dbc10 100644
--- a/test/CodeGen/AMDGPU/load-global-f64.ll
+++ b/test/CodeGen/AMDGPU/load-global-f64.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}global_load_f64:
; GCN-NOHSA: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
diff --git a/test/CodeGen/AMDGPU/load-global-i16.ll b/test/CodeGen/AMDGPU/load-global-i16.ll
index e3415b9c47de..cb2495d5fdcf 100644
--- a/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-SI,FUNC %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-HSA,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-VI,FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=EGCM -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=CM -check-prefix=EGCM -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-SI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-HSA,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-VI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=EGCM -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=CM -check-prefix=EGCM -check-prefix=FUNC %s
; FIXME: r600 is broken because the bigger testcases spill and it's not implemented
diff --git a/test/CodeGen/AMDGPU/load-global-i32.ll b/test/CodeGen/AMDGPU/load-global-i32.ll
index 5df32c1e3120..6360d39666c7 100644
--- a/test/CodeGen/AMDGPU/load-global-i32.ll
+++ b/test/CodeGen/AMDGPU/load-global-i32.ll
@@ -1,7 +1,7 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}global_load_i32:
diff --git a/test/CodeGen/AMDGPU/load-global-i64.ll b/test/CodeGen/AMDGPU/load-global-i64.ll
index de16b6c8997e..c71db0b7357c 100644
--- a/test/CodeGen/AMDGPU/load-global-i64.ll
+++ b/test/CodeGen/AMDGPU/load-global-i64.ll
@@ -1,9 +1,9 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}global_load_i64:
; GCN-NOHSA: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
diff --git a/test/CodeGen/AMDGPU/load-global-i8.ll b/test/CodeGen/AMDGPU/load-global-i8.ll
index fc0cbf916b52..3fe6bd26be14 100644
--- a/test/CodeGen/AMDGPU/load-global-i8.ll
+++ b/test/CodeGen/AMDGPU/load-global-i8.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,SI,FUNC %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-HSA,SI,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,VI,FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,SI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-HSA,SI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,VI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}global_load_i8:
diff --git a/test/CodeGen/AMDGPU/load-weird-sizes.ll b/test/CodeGen/AMDGPU/load-weird-sizes.ll
index d6162c388b5b..f9ba6241fe06 100644
--- a/test/CodeGen/AMDGPU/load-weird-sizes.ll
+++ b/test/CodeGen/AMDGPU/load-weird-sizes.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NOHSA -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=CI-HSA -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NOHSA -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=R600 -check-prefix=CM -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NOHSA -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=CI-HSA -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NOHSA -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=R600 -check-prefix=CM -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}load_i24:
; SI: {{flat|buffer}}_load_ubyte
diff --git a/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll b/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll
index 74564f387ede..e1a2af6c7ef9 100644
--- a/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll
+++ b/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll
@@ -1,4 +1,5 @@
; RUN: opt -S -amdgpu-lower-intrinsics %s | FileCheck -check-prefix=OPT %s
+; RUN: opt -S -amdgpu-lower-intrinsics -use-wide-memcpy-loop-lowering=true %s | FileCheck -check-prefix=WOPT %s
declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i64, i32, i1) #1
declare void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(3)* nocapture readonly, i32, i32, i1) #1
@@ -21,6 +22,17 @@ define amdgpu_kernel void @max_size_small_static_memcpy_caller0(i8 addrspace(1)*
; OPT-NEXT: load i8
; OPT: getelementptr
; OPT-NEXT: store i8
+
+; WOPT-LABEL: @min_size_large_static_memcpy_caller0(
+; WOPT-NOT: call
+; WOPT: br label %load-store-loop
+; WOPT: [[T1:%[0-9]+]] = getelementptr inbounds i8, i8 addrspace(1)* %src, i64 %loop-index
+; WOPT-NEXT: [[T2:%[0-9]+]] = load i8, i8 addrspace(1)* [[T1]]
+; WOPT-NEXT: [[T3:%[0-9]+]] = getelementptr inbounds i8, i8 addrspace(1)* %dst, i64 %loop-index
+; WOPT-NEXT: store i8 [[T2]], i8 addrspace(1)* [[T3]]
+; WOPT-NEXT: [[T4:%[0-9]+]] = add i64 %loop-index, 1
+; WOPT-NEXT: [[T5:%[0-9]+]] = icmp ult i64 [[T4]], 1025
+; WOPT-NEXT: br i1 [[T5]], label %load-store-loop, label %memcpy-split
define amdgpu_kernel void @min_size_large_static_memcpy_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 {
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1025, i32 1, i1 false)
ret void
diff --git a/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir b/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir
new file mode 100644
index 000000000000..768acf35eeae
--- /dev/null
+++ b/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir
@@ -0,0 +1,227 @@
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass machine-scheduler -o - %s | FileCheck -check-prefix=GCN %s
+
+# GCN-LABEL: name: cluster_add_addc
+# GCN: S_NOP 0, implicit-def %vcc
+# GCN: dead %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec
+# GCN: dead %4, dead %5 = V_ADDC_U32_e64 %6, %7, %3, implicit %exec
+name: cluster_add_addc
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: sreg_64 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: sreg_64 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 0, implicit %exec
+ %1 = V_MOV_B32_e32 0, implicit %exec
+ %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec
+ %6 = V_MOV_B32_e32 0, implicit %exec
+ %7 = V_MOV_B32_e32 0, implicit %exec
+ S_NOP 0, implicit def %vcc
+ %4, %5 = V_ADDC_U32_e64 %6, %7, %3, implicit %exec
+...
+
+# GCN-LABEL: name: interleave_add64s
+# GCN: dead %8, %9 = V_ADD_I32_e64 %0, %1, implicit %exec
+# GCN-NEXT: dead %12, dead %13 = V_ADDC_U32_e64 %4, %5, %9, implicit %exec
+# GCN-NEXT: dead %10, %11 = V_ADD_I32_e64 %2, %3, implicit %exec
+# GCN-NEXT: dead %14, dead %15 = V_ADDC_U32_e64 %6, %7, %11, implicit %exec
+name: interleave_add64s
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: vgpr_32 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+ - { id: 8, class: vgpr_32 }
+ - { id: 9, class: sreg_64 }
+ - { id: 10, class: vgpr_32 }
+ - { id: 11, class: sreg_64 }
+ - { id: 12, class: vgpr_32 }
+ - { id: 13, class: sreg_64 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: sreg_64 }
+
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 0, implicit %exec
+ %1 = V_MOV_B32_e32 0, implicit %exec
+ %2 = V_MOV_B32_e32 0, implicit %exec
+ %3 = V_MOV_B32_e32 0, implicit %exec
+ %4 = V_MOV_B32_e32 0, implicit %exec
+ %5 = V_MOV_B32_e32 0, implicit %exec
+ %6 = V_MOV_B32_e32 0, implicit %exec
+ %7 = V_MOV_B32_e32 0, implicit %exec
+
+ %8, %9 = V_ADD_I32_e64 %0, %1, implicit %exec
+ %10, %11 = V_ADD_I32_e64 %2, %3, implicit %exec
+
+
+ %12, %13 = V_ADDC_U32_e64 %4, %5, %9, implicit %exec
+ %14, %15 = V_ADDC_U32_e64 %6, %7, %11, implicit %exec
+...
+
+# GCN-LABEL: name: cluster_mov_addc
+# GCN: S_NOP 0, implicit-def %vcc
+# GCN-NEXT: %2 = S_MOV_B64 0
+# GCN-NEXT: dead %3, dead %4 = V_ADDC_U32_e64 %0, %1, %2, implicit %exec
+name: cluster_mov_addc
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: sreg_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 0, implicit %exec
+ %1 = V_MOV_B32_e32 0, implicit %exec
+ %2 = S_MOV_B64 0
+ S_NOP 0, implicit def %vcc
+ %3, %4 = V_ADDC_U32_e64 %0, %1, %2, implicit %exec
+...
+
+# GCN-LABEL: name: no_cluster_add_addc_diff_sgpr
+# GCN: dead %2, dead %3 = V_ADD_I32_e64 %0, %1, implicit %exec
+# GCN-NEXT: %6 = V_MOV_B32_e32 0, implicit %exec
+# GCN-NEXT: %7 = V_MOV_B32_e32 0, implicit %exec
+# GCN-NEXT: S_NOP 0, implicit-def %vcc
+# GCN-NEXT: %8 = S_MOV_B64 0
+# GCN-NEXT: dead %4, dead %5 = V_ADDC_U32_e64 %6, %7, %8, implicit %exec
+name: no_cluster_add_addc_diff_sgpr
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: sreg_64 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: sreg_64 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+ - { id: 8, class: sreg_64 }
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 0, implicit %exec
+ %1 = V_MOV_B32_e32 0, implicit %exec
+ %8 = S_MOV_B64 0
+ %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec
+ %6 = V_MOV_B32_e32 0, implicit %exec
+ %7 = V_MOV_B32_e32 0, implicit %exec
+ S_NOP 0, implicit def %vcc
+ %4, %5 = V_ADDC_U32_e64 %6, %7, %8, implicit %exec
+...
+# GCN-LABEL: name: cluster_sub_subb
+# GCN: S_NOP 0, implicit-def %vcc
+# GCN: dead %2, %3 = V_SUB_I32_e64 %0, %1, implicit %exec
+# GCN: dead %4, dead %5 = V_SUBB_U32_e64 %6, %7, %3, implicit %exec
+name: cluster_sub_subb
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: sreg_64 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: sreg_64 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 0, implicit %exec
+ %1 = V_MOV_B32_e32 0, implicit %exec
+ %2, %3 = V_SUB_I32_e64 %0, %1, implicit %exec
+ %6 = V_MOV_B32_e32 0, implicit %exec
+ %7 = V_MOV_B32_e32 0, implicit %exec
+ S_NOP 0, implicit def %vcc
+ %4, %5 = V_SUBB_U32_e64 %6, %7, %3, implicit %exec
+...
+
+# GCN-LABEL: name: cluster_cmp_cndmask
+# GCN: S_NOP 0, implicit-def %vcc
+# GCN-NEXT: %3 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
+# GCN-NEXT: dead %4 = V_CNDMASK_B32_e64 %0, %1, %3, implicit %exec
+name: cluster_cmp_cndmask
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: sreg_64 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: sreg_64 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 0, implicit %exec
+ %1 = V_MOV_B32_e32 0, implicit %exec
+ %3 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
+ S_NOP 0, implicit def %vcc
+ %4 = V_CNDMASK_B32_e64 %0, %1, %3, implicit %exec
+...
+
+# GCN-LABEL: name: cluster_multi_use_cmp_cndmask
+# GCN: %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
+# GCN-NEXT: dead %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
+# GCN-NEXT: dead %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+name: cluster_multi_use_cmp_cndmask
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64 }
+ - { id: 5, class: vgpr_32 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 0, implicit %exec
+ %1 = V_MOV_B32_e32 0, implicit %exec
+ %2 = V_MOV_B32_e32 0, implicit %exec
+ %3 = V_MOV_B32_e32 0, implicit %exec
+
+ %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
+ S_NOP 0, implicit def %vcc
+ %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
+ %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+...
+
+# GCN-LABEL: name: cluster_multi_use_cmp_cndmask2
+# GCN: %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
+# GCN-NEXT: dead %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
+# GCN-NEXT: %3 = V_MOV_B32_e32 0, implicit %exec
+# GCN-NEXT: dead %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+name: cluster_multi_use_cmp_cndmask2
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64 }
+ - { id: 5, class: vgpr_32 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 0, implicit %exec
+ %1 = V_MOV_B32_e32 0, implicit %exec
+ %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
+ %2 = V_MOV_B32_e32 0, implicit %exec
+ %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
+ %3 = V_MOV_B32_e32 0, implicit %exec
+ %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+...
diff --git a/test/CodeGen/AMDGPU/mad-combine.ll b/test/CodeGen/AMDGPU/mad-combine.ll
index b855fc500c6b..8a6bf853a7c6 100644
--- a/test/CodeGen/AMDGPU/mad-combine.ll
+++ b/test/CodeGen/AMDGPU/mad-combine.ll
@@ -19,15 +19,15 @@ declare float @llvm.fmuladd.f32(float, float, float) #0
; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
-; SI-STD: v_mac_f32_e32 [[C]], [[B]], [[A]]
+; SI-STD: v_mac_f32_e32 [[C]], [[A]], [[B]]
; SI-DENORM-FASTFMAF: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[C]]
; SI-DENORM-SLOWFMAF-NOT: v_fma
; SI-DENORM-SLOWFMAF-NOT: v_mad
-; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
-; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[A]], [[B]]
+; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; SI-STD: buffer_store_dword [[C]]
@@ -55,15 +55,15 @@ define amdgpu_kernel void @combine_to_mad_f32_0(float addrspace(1)* noalias %out
; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
-; SI-STD-DAG: v_mac_f32_e32 [[C]], [[B]], [[A]]
-; SI-STD-DAG: v_mac_f32_e32 [[D]], [[B]], [[A]]
+; SI-STD-DAG: v_mac_f32_e32 [[C]], [[A]], [[B]]
+; SI-STD-DAG: v_mac_f32_e32 [[D]], [[A]], [[B]]
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], [[A]], [[B]], [[C]]
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], [[D]]
-; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
-; SI-DENORM-SLOWFMAF-DAG: v_add_f32_e32 [[RESULT0:v[0-9]+]], [[C]], [[TMP]]
-; SI-DENORM-SLOWFMAF-DAG: v_add_f32_e32 [[RESULT1:v[0-9]+]], [[D]], [[TMP]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[A]], [[B]]
+; SI-DENORM-SLOWFMAF-DAG: v_add_f32_e32 [[RESULT0:v[0-9]+]], [[TMP]], [[C]]
+; SI-DENORM-SLOWFMAF-DAG: v_add_f32_e32 [[RESULT1:v[0-9]+]], [[TMP]], [[D]]
; SI-DENORM-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DENORM-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
@@ -99,11 +99,11 @@ define amdgpu_kernel void @combine_to_mad_f32_0_2use(float addrspace(1)* noalias
; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
-; SI-STD: v_mac_f32_e32 [[C]], [[B]], [[A]]
+; SI-STD: v_mac_f32_e32 [[C]], [[A]], [[B]]
; SI-DENORM-FASTFMAF: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[C]]
-; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
-; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[A]], [[B]]
+; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; SI-STD: buffer_store_dword [[C]]
@@ -133,8 +133,8 @@ define amdgpu_kernel void @combine_to_mad_f32_1(float addrspace(1)* noalias %out
; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], -[[C]]
; SI-DENORM-FASTFMAF: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], -[[C]]
-; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
-; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[A]], [[B]]
+; SI-DENORM-SLOWFMAF: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
; SI: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
@@ -167,9 +167,9 @@ define amdgpu_kernel void @combine_to_mad_fsub_0_f32(float addrspace(1)* noalias
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], [[A]], [[B]], -[[C]]
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], -[[D]]
-; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
-; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT0:v[0-9]+]], [[C]], [[TMP]]
-; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[D]], [[TMP]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[A]], [[B]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e32 [[RESULT0:v[0-9]+]], [[TMP]], [[C]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e32 [[RESULT1:v[0-9]+]], [[TMP]], [[D]]
; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
@@ -205,8 +205,8 @@ define amdgpu_kernel void @combine_to_mad_fsub_0_f32_2use(float addrspace(1)* no
; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], -[[A]], [[B]], [[C]]
; SI-DENORM-FASTFMAF: v_fma_f32 [[RESULT:v[0-9]+]], -[[A]], [[B]], [[C]]
-; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
-; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[A]], [[B]]
+; SI-DENORM-SLOWFMAF: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
; SI: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
@@ -238,9 +238,9 @@ define amdgpu_kernel void @combine_to_mad_fsub_1_f32(float addrspace(1)* noalias
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], -[[A]], [[B]], [[C]]
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], -[[A]], [[B]], [[D]]
-; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
-; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT0:v[0-9]+]], [[TMP]], [[C]]
-; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[TMP]], [[D]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[A]], [[B]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e32 [[RESULT0:v[0-9]+]], [[C]], [[TMP]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e32 [[RESULT1:v[0-9]+]], [[D]], [[TMP]]
; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
@@ -278,7 +278,7 @@ define amdgpu_kernel void @combine_to_mad_fsub_1_f32_2use(float addrspace(1)* no
; SI-DENORM-FASTFMAF: v_fma_f32 [[RESULT:v[0-9]+]], -[[A]], [[B]], -[[C]]
; SI-DENORM-SLOWFMAF: v_mul_f32_e64 [[TMP:v[0-9]+]], [[A]], -[[B]]
-; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
+; SI-DENORM-SLOWFMAF: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
; SI: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
@@ -313,8 +313,8 @@ define amdgpu_kernel void @combine_to_mad_fsub_2_f32(float addrspace(1)* noalias
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], -[[A]], [[B]], -[[D]]
; SI-DENORM-SLOWFMAF: v_mul_f32_e64 [[TMP:v[0-9]+]], [[A]], -[[B]]
-; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT0:v[0-9]+]], [[C]], [[TMP]]
-; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[D]], [[TMP]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e32 [[RESULT0:v[0-9]+]], [[TMP]], [[C]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e32 [[RESULT1:v[0-9]+]], [[TMP]], [[D]]
; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
@@ -355,9 +355,9 @@ define amdgpu_kernel void @combine_to_mad_fsub_2_f32_2uses_neg(float addrspace(1
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], -[[A]], [[B]], -[[C]]
; SI-DENORM-FASTFMAF-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], -[[D]]
-; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[A]], [[B]]
; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e64 [[RESULT0:v[0-9]+]], -[[TMP]], [[C]]
-; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[D]], [[TMP]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e32 [[RESULT1:v[0-9]+]], [[TMP]], [[D]]
; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
@@ -395,13 +395,13 @@ define amdgpu_kernel void @combine_to_mad_fsub_2_f32_2uses_mul(float addrspace(1
; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
; SI-DAG: buffer_load_dword [[E:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
-; SI-STD: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-STD: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
; SI-STD: v_fma_f32 [[TMP1:v[0-9]+]], [[A]], [[B]], [[TMP0]]
-; SI-STD: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP1]]
+; SI-STD: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP1]], [[C]]
-; SI-DENORM: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-DENORM: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
; SI-DENORM: v_fma_f32 [[TMP1:v[0-9]+]], [[A]], [[B]], [[TMP0]]
-; SI-DENORM: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[C]], [[TMP1]]
+; SI-DENORM: v_sub_f32_e32 [[RESULT1:v[0-9]+]], [[TMP1]], [[C]]
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
@@ -437,13 +437,13 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(float addrspace(
; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
; SI-DAG: buffer_load_dword [[E:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
-; SI-STD: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-STD: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
; SI-STD: v_fma_f32 [[TMP1:v[0-9]+]], [[B]], [[C]], [[TMP0]]
-; SI-STD: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP1]], [[A]]
+; SI-STD: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[A]], [[TMP1]]
-; SI-DENORM: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-DENORM: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
; SI-DENORM: v_fma_f32 [[TMP1:v[0-9]+]], [[B]], [[C]], [[TMP0]]
-; SI-DENORM: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP1]], [[A]]
+; SI-DENORM: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[A]], [[TMP1]]
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: s_endpgm
@@ -479,21 +479,21 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_1_f32(float addrspace(
; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
; SI-DAG: buffer_load_dword [[E:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
-; SI-STD-SAFE: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
-; SI-STD-SAFE: v_mac_f32_e32 [[TMP0]], [[B]], [[A]]
-; SI-STD-SAFE: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP0]]
+; SI-STD-SAFE: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
+; SI-STD-SAFE: v_mac_f32_e32 [[TMP0]], [[A]], [[B]]
+; SI-STD-SAFE: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP0]], [[C]]
; SI-STD-UNSAFE: v_mad_f32 [[RESULT:v[0-9]+]], [[D]], [[E]], -[[C]]
-; SI-STD-UNSAFE: v_mac_f32_e32 [[RESULT]], [[B]], [[A]]
+; SI-STD-UNSAFE: v_mac_f32_e32 [[RESULT]], [[A]], [[B]]
-; SI-DENORM-FASTFMAF: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-DENORM-FASTFMAF: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
; SI-DENORM-FASTFMAF: v_fma_f32 [[TMP1:v[0-9]+]], [[A]], [[B]], [[TMP0]]
-; SI-DENORM-FASTFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP1]]
+; SI-DENORM-FASTFMAF: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP1]], [[C]]
-; SI-DENORM-SLOWFMAF-DAG: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
-; SI-DENORM-SLOWFMAF-DAG: v_mul_f32_e32 [[TMP1:v[0-9]+]], [[B]], [[A]]
-; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[TMP2:v[0-9]+]], [[TMP0]], [[TMP1]]
-; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP2]]
+; SI-DENORM-SLOWFMAF-DAG: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
+; SI-DENORM-SLOWFMAF-DAG: v_mul_f32_e32 [[TMP1:v[0-9]+]], [[A]], [[B]]
+; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[TMP2:v[0-9]+]], [[TMP1]], [[TMP0]]
+; SI-DENORM-SLOWFMAF: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP2]], [[C]]
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: s_endpgm
@@ -530,21 +530,21 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_2_f32(float addrspace(
; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
; SI-DAG: buffer_load_dword [[E:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
-; SI-STD-SAFE: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
-; SI-STD-SAFE: v_mac_f32_e32 [[TMP0]], [[C]], [[B]]
-; SI-STD-SAFE: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP0]], [[A]]
+; SI-STD-SAFE: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
+; SI-STD-SAFE: v_mac_f32_e32 [[TMP0]], [[B]], [[C]]
+; SI-STD-SAFE: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[A]], [[TMP0]]
; SI-STD-UNSAFE: v_mad_f32 [[TMP:v[0-9]+]], -[[D]], [[E]], [[A]]
; SI-STD-UNSAFE: v_mad_f32 [[RESULT:v[0-9]+]], -[[B]], [[C]], [[TMP]]
-; SI-DENORM-FASTFMAF: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-DENORM-FASTFMAF: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
; SI-DENORM-FASTFMAF: v_fma_f32 [[TMP1:v[0-9]+]], [[B]], [[C]], [[TMP0]]
-; SI-DENORM-FASTFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP1]], [[A]]
+; SI-DENORM-FASTFMAF: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[A]], [[TMP1]]
-; SI-DENORM-SLOWFMAF-DAG: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
-; SI-DENORM-SLOWFMAF-DAG: v_mul_f32_e32 [[TMP1:v[0-9]+]], [[C]], [[B]]
-; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[TMP2:v[0-9]+]], [[TMP0]], [[TMP1]]
-; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP2]], [[A]]
+; SI-DENORM-SLOWFMAF-DAG: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[D]], [[E]]
+; SI-DENORM-SLOWFMAF-DAG: v_mul_f32_e32 [[TMP1:v[0-9]+]], [[B]], [[C]]
+; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[TMP2:v[0-9]+]], [[TMP1]], [[TMP0]]
+; SI-DENORM-SLOWFMAF: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[A]], [[TMP2]]
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: s_endpgm
diff --git a/test/CodeGen/AMDGPU/madak.ll b/test/CodeGen/AMDGPU/madak.ll
index 8e0014911def..77c35fac8b5d 100644
--- a/test/CodeGen/AMDGPU/madak.ll
+++ b/test/CodeGen/AMDGPU/madak.ll
@@ -34,8 +34,8 @@ define amdgpu_kernel void @madak_f32(float addrspace(1)* noalias %out, float add
; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; GCN-DAG: buffer_load_dword [[VC:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
-; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VB]], [[VA]], [[VK]]
-; GCN-DAG: v_mac_f32_e32 [[VK]], [[VC]], [[VA]]
+; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VB]], [[VK]]
+; GCN-DAG: v_mac_f32_e32 [[VK]], [[VA]], [[VC]]
; GCN: s_endpgm
define amdgpu_kernel void @madak_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -199,7 +199,7 @@ define amdgpu_kernel void @no_madak_src1_modifier_f32(float addrspace(1)* noalia
; GCN: v_mov_b32_e32 [[SGPR0_VCOPY:v[0-9]+]], [[SGPR0]]
; GCN: buffer_load_dword [[VGPR:v[0-9]+]]
; GCN: v_madak_f32 [[MADAK:v[0-9]+]], 0.5, [[SGPR0_VCOPY]], 0x42280000
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[VGPR]], [[MADAK]]
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[MADAK]], [[VGPR]]
; GCN: buffer_store_dword [[MUL]]
define amdgpu_kernel void @madak_constant_bus_violation(i32 %arg1, float %sgpr0, float %sgpr1) #0 {
bb:
diff --git a/test/CodeGen/AMDGPU/madmk.ll b/test/CodeGen/AMDGPU/madmk.ll
index 6bc40e82459b..b78d65ae1e1a 100644
--- a/test/CodeGen/AMDGPU/madmk.ll
+++ b/test/CodeGen/AMDGPU/madmk.ll
@@ -32,8 +32,8 @@ define amdgpu_kernel void @madmk_f32(float addrspace(1)* noalias %out, float add
; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; GCN-DAG: buffer_load_dword [[VC:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
-; GCN-DAG: v_mac_f32_e32 [[VB]], [[VK]], [[VA]]
-; GCN-DAG: v_mac_f32_e32 [[VC]], [[VK]], [[VA]]
+; GCN-DAG: v_mac_f32_e32 [[VB]], [[VA]], [[VK]]
+; GCN-DAG: v_mac_f32_e32 [[VC]], [[VA]], [[VK]]
; GCN: s_endpgm
define amdgpu_kernel void @madmk_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
diff --git a/test/CodeGen/AMDGPU/max.ll b/test/CodeGen/AMDGPU/max.ll
index ffcdac03bc74..6387c9ff6dfa 100644
--- a/test/CodeGen/AMDGPU/max.ll
+++ b/test/CodeGen/AMDGPU/max.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}v_test_imax_sge_i32:
diff --git a/test/CodeGen/AMDGPU/merge-stores.ll b/test/CodeGen/AMDGPU/merge-stores.ll
index dfd5b97fcc86..6b0ec483247c 100644
--- a/test/CodeGen/AMDGPU/merge-stores.ll
+++ b/test/CodeGen/AMDGPU/merge-stores.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
; This test is mostly to test DAG store merging, so disable the vectorizer.
; Run with devices with different unaligned load restrictions.
diff --git a/test/CodeGen/AMDGPU/mubuf.ll b/test/CodeGen/AMDGPU/mubuf.ll
index b23b21118aaa..97666492e376 100644
--- a/test/CodeGen/AMDGPU/mubuf.ll
+++ b/test/CodeGen/AMDGPU/mubuf.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -show-mc-encoding -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -show-mc-encoding -verify-machineinstrs < %s | FileCheck %s
declare i32 @llvm.amdgcn.workitem.id.x() readnone
diff --git a/test/CodeGen/AMDGPU/mul.ll b/test/CodeGen/AMDGPU/mul.ll
index 57c50c9804e5..a0290789175d 100644
--- a/test/CodeGen/AMDGPU/mul.ll
+++ b/test/CodeGen/AMDGPU/mul.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s -check-prefix=FUNC
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s -check-prefix=FUNC
; mul24 and mad24 are affected
diff --git a/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll b/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
index 82c27f204a47..ba3ff0b08bc9 100644
--- a/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
+++ b/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
@@ -66,9 +66,9 @@
; FIXME: Why is this compare essentially repeated?
; GCN: v_cmp_eq_u32_e32 vcc, 1, [[REG:v[0-9]+]]
-; GCN-NEXT: v_cmp_ne_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, 1, [[REG]]
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, vcc
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1
+; GCN: v_cmp_ne_u32_e32 vcc, 1, [[REG]]
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, vcc
; GCN: ; %Flow1
; GCN-NEXT: s_or_b64 exec, exec
diff --git a/test/CodeGen/AMDGPU/no-shrink-extloads.ll b/test/CodeGen/AMDGPU/no-shrink-extloads.ll
index 8a7bf6db5b8d..500e4cb3cc73 100644
--- a/test/CodeGen/AMDGPU/no-shrink-extloads.ll
+++ b/test/CodeGen/AMDGPU/no-shrink-extloads.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
diff --git a/test/CodeGen/AMDGPU/or.ll b/test/CodeGen/AMDGPU/or.ll
index eb082843fb82..8e6885c4fc5e 100644
--- a/test/CodeGen/AMDGPU/or.ll
+++ b/test/CodeGen/AMDGPU/or.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}or_v2i32:
diff --git a/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll b/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
index f83eb56dc6ed..776b151e3017 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) #0
declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) #0
diff --git a/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll b/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll
index ecb513cd80b6..d8c7438e4d0d 100644
--- a/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll
+++ b/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}reduce_i64_load_align_4_width_to_i32:
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/regcoal-subrange-join.mir b/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
new file mode 100644
index 000000000000..bac348aaed70
--- /dev/null
+++ b/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
@@ -0,0 +1,162 @@
+# RUN: llc -march=amdgcn -run-pass simple-register-coalescing -o - %s | FileCheck --check-prefix=GCN %s
+#
+# See bug http://llvm.org/PR33524 for details of the problem being checked here
+# This test will provoke a subrange join (see annotations below) during simple register coalescing
+# Without a fix for PR33524 this causes an unreachable in SubRange Join
+#
+# GCN-DAG: undef %[[REG0:[0-9]+]].sub0 = COPY %sgpr5
+# GCN-DAG: undef %[[REG1:[0-9]+]].sub0 = COPY %sgpr2
+# GCN-DAG: %[[REG0]].sub1 = S_MOV_B32 1
+# GCN-DAG: %[[REG1]].sub1 = S_MOV_B32 1
+
+--- |
+ define amdgpu_vs void @regcoal-subrange-join(i32 inreg %arg, i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3, i32 inreg %arg4, i32 inreg %arg5, i32 %arg6) local_unnamed_addr #0 {
+ ret void
+ }
+
+...
+---
+name: regcoal-subrange-join
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sreg_64 }
+ - { id: 1, class: vreg_128 }
+ - { id: 2, class: vreg_128 }
+ - { id: 3, class: vreg_128 }
+ - { id: 4, class: sreg_32_xm0 }
+ - { id: 5, class: sreg_32_xm0 }
+ - { id: 6, class: sreg_32_xm0, preferred-register: '%8' }
+ - { id: 7, class: vreg_128 }
+ - { id: 8, class: sreg_32_xm0, preferred-register: '%6' }
+ - { id: 9, class: vreg_128 }
+ - { id: 10, class: sgpr_32 }
+ - { id: 11, class: sgpr_32 }
+ - { id: 12, class: sgpr_32 }
+ - { id: 13, class: sgpr_32 }
+ - { id: 14, class: sgpr_32 }
+ - { id: 15, class: sgpr_32 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: sreg_32_xm0 }
+ - { id: 18, class: sreg_64 }
+ - { id: 19, class: sreg_32_xm0 }
+ - { id: 20, class: sreg_32_xm0 }
+ - { id: 21, class: sreg_64 }
+ - { id: 22, class: sreg_32_xm0_xexec }
+ - { id: 23, class: sreg_32_xm0 }
+ - { id: 24, class: sreg_64_xexec }
+ - { id: 25, class: sreg_128 }
+ - { id: 26, class: sreg_64_xexec }
+ - { id: 27, class: sreg_32_xm0_xexec }
+ - { id: 28, class: sreg_32_xm0 }
+ - { id: 29, class: vgpr_32 }
+ - { id: 30, class: vgpr_32 }
+ - { id: 31, class: vgpr_32 }
+ - { id: 32, class: vgpr_32 }
+ - { id: 33, class: vgpr_32 }
+ - { id: 34, class: vgpr_32 }
+ - { id: 35, class: vgpr_32 }
+ - { id: 36, class: vgpr_32 }
+ - { id: 37, class: vgpr_32 }
+ - { id: 38, class: sreg_128 }
+ - { id: 39, class: sreg_64_xexec }
+ - { id: 40, class: sreg_32_xm0_xexec }
+ - { id: 41, class: sreg_32_xm0 }
+ - { id: 42, class: vgpr_32 }
+ - { id: 43, class: vgpr_32 }
+ - { id: 44, class: vgpr_32 }
+ - { id: 45, class: vgpr_32 }
+ - { id: 46, class: vgpr_32 }
+ - { id: 47, class: vgpr_32 }
+ - { id: 48, class: vgpr_32 }
+ - { id: 49, class: vgpr_32 }
+ - { id: 50, class: vgpr_32 }
+ - { id: 51, class: sreg_128 }
+ - { id: 52, class: vgpr_32 }
+ - { id: 53, class: vgpr_32 }
+ - { id: 54, class: vgpr_32 }
+ - { id: 55, class: vgpr_32 }
+ - { id: 56, class: vreg_128 }
+ - { id: 57, class: vreg_128 }
+ - { id: 58, class: vreg_128 }
+ - { id: 59, class: sreg_32_xm0 }
+ - { id: 60, class: sreg_32_xm0 }
+ - { id: 61, class: vreg_128 }
+liveins:
+ - { reg: '%sgpr2', virtual-reg: '%12' }
+ - { reg: '%sgpr5', virtual-reg: '%15' }
+body: |
+ bb.0:
+ liveins: %sgpr2, %sgpr5
+
+ %15 = COPY killed %sgpr5
+ %12 = COPY killed %sgpr2
+ %17 = S_MOV_B32 1
+ undef %18.sub1 = COPY %17
+ %0 = COPY %18
+ %0.sub0 = COPY killed %12
+ %21 = COPY killed %18
+ %21.sub0 = COPY killed %15
+ %22 = S_LOAD_DWORD_IMM killed %21, 2, 0
+ %23 = S_MOV_B32 491436
+ undef %24.sub0 = COPY killed %22
+ %24.sub1 = COPY killed %23
+ %25 = S_LOAD_DWORDX4_IMM killed %24, 0, 0
+ %1 = COPY killed %25
+ %26 = S_LOAD_DWORDX2_IMM %0, 2, 0
+ dead %27 = S_LOAD_DWORD_IMM killed %26, 0, 0
+ S_CBRANCH_SCC0 %bb.1, implicit undef %scc
+
+ bb.5:
+ %58 = COPY killed %1
+ %59 = COPY killed %17
+ S_BRANCH %bb.2
+
+ bb.1:
+ %30 = V_MOV_B32_e32 1036831949, implicit %exec
+ %31 = V_ADD_F32_e32 %30, %1.sub3, implicit %exec
+ %33 = V_ADD_F32_e32 %30, %1.sub2, implicit %exec
+ %35 = V_ADD_F32_e32 %30, %1.sub1, implicit %exec
+ %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit %exec
+ undef %56.sub0 = COPY killed %37
+ %56.sub1 = COPY killed %35
+ %56.sub2 = COPY killed %33
+ %56.sub3 = COPY killed %31
+ %28 = S_MOV_B32 0
+ %2 = COPY killed %56
+ %58 = COPY killed %2
+ %59 = COPY killed %28
+
+ bb.2:
+ %4 = COPY killed %59
+ %3 = COPY killed %58
+ %39 = S_LOAD_DWORDX2_IMM killed %0, 6, 0
+ %40 = S_LOAD_DWORD_IMM killed %39, 0, 0
+ %43 = V_MOV_B32_e32 -1102263091, implicit %exec
+ %60 = COPY killed %4
+ %61 = COPY killed %3
+
+ bb.3:
+ successors: %bb.3, %bb.4
+
+ %7 = COPY killed %61
+ %6 = COPY killed %60
+ %8 = S_ADD_I32 killed %6, 1, implicit-def dead %scc
+ %44 = V_ADD_F32_e32 %43, %7.sub3, implicit %exec
+ %46 = V_ADD_F32_e32 %43, %7.sub2, implicit %exec
+ %48 = V_ADD_F32_e32 %43, %7.sub1, implicit %exec
+ %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit %exec
+ undef %57.sub0 = COPY killed %50
+ %57.sub1 = COPY killed %48
+ %57.sub2 = COPY %46
+ %57.sub3 = COPY killed %44
+ S_CMP_LT_I32 %8, %40, implicit-def %scc
+ %60 = COPY killed %8
+ %61 = COPY killed %57
+ S_CBRANCH_SCC1 %bb.3, implicit killed %scc
+ S_BRANCH %bb.4
+
+ bb.4:
+ EXP 32, undef %53, undef %54, killed %46, undef %55, 0, 0, 15, implicit %exec
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/AMDGPU/reorder-stores.ll b/test/CodeGen/AMDGPU/reorder-stores.ll
index ff4069226a62..260b32ed3406 100644
--- a/test/CodeGen/AMDGPU/reorder-stores.ll
+++ b/test/CodeGen/AMDGPU/reorder-stores.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}no_reorder_v2f64_global_load_store:
; SI: buffer_load_dwordx4
diff --git a/test/CodeGen/AMDGPU/rotl.i64.ll b/test/CodeGen/AMDGPU/rotl.i64.ll
index 266490718dd1..fa29d789cebe 100644
--- a/test/CodeGen/AMDGPU/rotl.i64.ll
+++ b/test/CodeGen/AMDGPU/rotl.i64.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
; BOTH-LABEL: {{^}}s_rotl_i64:
; BOTH-DAG: s_lshl_b64
diff --git a/test/CodeGen/AMDGPU/rotr.i64.ll b/test/CodeGen/AMDGPU/rotr.i64.ll
index 9eda479cd25c..af58b404ca6c 100644
--- a/test/CodeGen/AMDGPU/rotr.i64.ll
+++ b/test/CodeGen/AMDGPU/rotr.i64.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
; BOTH-LABEL: {{^}}s_rotr_i64:
; BOTH-DAG: s_sub_i32
diff --git a/test/CodeGen/AMDGPU/rsq.ll b/test/CodeGen/AMDGPU/rsq.ll
index 9462683efe0e..204eeb998386 100644
--- a/test/CodeGen/AMDGPU/rsq.ll
+++ b/test/CodeGen/AMDGPU/rsq.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mattr=-fp32-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=-fp32-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI %s
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
declare float @llvm.sqrt.f32(float) nounwind readnone
@@ -48,8 +48,8 @@ define amdgpu_kernel void @rsq_f32_sgpr(float addrspace(1)* noalias %out, float
; SI-UNSAFE-DAG: v_rsq_f32_e32 [[RSQA:v[0-9]+]], [[A]]
; SI-UNSAFE-DAG: v_rcp_f32_e32 [[RCPB:v[0-9]+]], [[B]]
-; SI-UNSAFE-DAG: v_mul_f32_e32 [[TMP:v[0-9]+]], [[RCPB]], [[RSQA]]
-; SI-UNSAFE: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
+; SI-UNSAFE-DAG: v_mul_f32_e32 [[TMP:v[0-9]+]], [[RSQA]], [[RCPB]]
+; SI-UNSAFE: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
; SI-UNSAFE: buffer_store_dword [[RESULT]]
; SI-SAFE-NOT: v_rsq_f32
diff --git a/test/CodeGen/AMDGPU/s_movk_i32.ll b/test/CodeGen/AMDGPU/s_movk_i32.ll
index a131aaa3dfb4..797fbc2712b0 100644
--- a/test/CodeGen/AMDGPU/s_movk_i32.ll
+++ b/test/CodeGen/AMDGPU/s_movk_i32.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}s_movk_i32_k0:
; SI-DAG: s_mov_b32 [[LO_S_IMM:s[0-9]+]], 0xffff{{$}}
diff --git a/test/CodeGen/AMDGPU/sad.ll b/test/CodeGen/AMDGPU/sad.ll
index f7a1c65881d0..ee56e9053fd3 100644
--- a/test/CodeGen/AMDGPU/sad.ll
+++ b/test/CodeGen/AMDGPU/sad.ll
@@ -134,8 +134,8 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(i32 addrspace(1)* %out,
; GCN-LABEL: {{^}}v_sad_u32_multi_use_select_pat2:
; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: v_cmp_gt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_cmp_gt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DAG: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %b
diff --git a/test/CodeGen/AMDGPU/saddo.ll b/test/CodeGen/AMDGPU/saddo.ll
index 586a455b2b91..09e87d524419 100644
--- a/test/CodeGen/AMDGPU/saddo.ll
+++ b/test/CodeGen/AMDGPU/saddo.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs< %s
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/salu-to-valu.ll b/test/CodeGen/AMDGPU/salu-to-valu.ll
index 6e1dd1638333..d5b2fa0b6754 100644
--- a/test/CodeGen/AMDGPU/salu-to-valu.ll
+++ b/test/CodeGen/AMDGPU/salu-to-valu.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=CI %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI --check-prefix=GCN-HSA %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=CI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI --check-prefix=GCN-HSA %s
declare i32 @llvm.amdgcn.workitem.id.x() #0
declare i32 @llvm.amdgcn.workitem.id.y() #0
diff --git a/test/CodeGen/AMDGPU/scalar_to_vector.ll b/test/CodeGen/AMDGPU/scalar_to_vector.ll
index 62d0d9367885..0f09fa17423e 100644
--- a/test/CodeGen/AMDGPU/scalar_to_vector.ll
+++ b/test/CodeGen/AMDGPU/scalar_to_vector.ll
@@ -1,12 +1,12 @@
-; RUN: llc -march=amdgcn -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; XXX - Why the packing?
; GCN-LABEL: {{^}}scalar_to_vector_v2i32:
; GCN: buffer_load_dword [[VAL:v[0-9]+]],
; GCN: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 16, [[VAL]]
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[SHR]]
-; GCN: v_or_b32_e32 v[[OR:[0-9]+]], [[SHL]], [[SHR]]
+; GCN: v_or_b32_e32 v[[OR:[0-9]+]], [[SHR]], [[SHL]]
; GCN: v_mov_b32_e32 v[[COPY:[0-9]+]], v[[OR]]
; GCN: buffer_store_dwordx2 v{{\[}}[[OR]]:[[COPY]]{{\]}}
define amdgpu_kernel void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
diff --git a/test/CodeGen/AMDGPU/schedule-global-loads.ll b/test/CodeGen/AMDGPU/schedule-global-loads.ll
index 44d46086f02a..2dddba8bccc7 100644
--- a/test/CodeGen/AMDGPU/schedule-global-loads.ll
+++ b/test/CodeGen/AMDGPU/schedule-global-loads.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
; FIXME: This currently doesn't do a great job of clustering the
; loads, which end up with extra moves between them. Right now, it
diff --git a/test/CodeGen/AMDGPU/scratch-buffer.ll b/test/CodeGen/AMDGPU/scratch-buffer.ll
index 6b1e85915a11..4ae9871865f5 100644
--- a/test/CodeGen/AMDGPU/scratch-buffer.ll
+++ b/test/CodeGen/AMDGPU/scratch-buffer.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -march=amdgcn < %s | FileCheck -check-prefix=GCN %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -march=amdgcn < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=GCN %s
; When a frame index offset is more than 12-bits, make sure we don't store
; it in mubuf's offset field.
diff --git a/test/CodeGen/AMDGPU/scratch-simple.ll b/test/CodeGen/AMDGPU/scratch-simple.ll
index abd15f1fb47f..6ed730ad60f4 100644
--- a/test/CodeGen/AMDGPU/scratch-simple.ll
+++ b/test/CodeGen/AMDGPU/scratch-simple.ll
@@ -12,10 +12,8 @@
; GCN-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0
; GCN-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]]
-; GCN-DAG: v_mov_b32_e32 [[C200:v[0-9]+]], 0x200
-; GCN-DAG: v_mov_b32_e32 [[C400:v[0-9]+]], 0x400
-; GCN-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[C200]], [[CLAMP_IDX]]
-; GCN-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[C400]], [[CLAMP_IDX]]
+; GCN-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], 0x200, [[CLAMP_IDX]]
+; GCN-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], 0x400, [[CLAMP_IDX]]
; GCN: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
; GCN: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
diff --git a/test/CodeGen/AMDGPU/sdiv.ll b/test/CodeGen/AMDGPU/sdiv.ll
index 7ec6ca809b68..305107f690fb 100644
--- a/test/CodeGen/AMDGPU/sdiv.ll
+++ b/test/CodeGen/AMDGPU/sdiv.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; The code generated by sdiv is long and complex and may frequently change.
; The goal of this test is to make sure the ISel doesn't fail.
diff --git a/test/CodeGen/AMDGPU/sdwa-peephole.ll b/test/CodeGen/AMDGPU/sdwa-peephole.ll
index 0dc7cc309f7c..0d181c2c34b8 100644
--- a/test/CodeGen/AMDGPU/sdwa-peephole.ll
+++ b/test/CodeGen/AMDGPU/sdwa-peephole.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole=0 -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=NOSDWA -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=SDWA -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sdwa-peephole -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=SDWA -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole=0 -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=NOSDWA -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=SDWA -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -amdgpu-sdwa-peephole -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=SDWA -check-prefix=GCN %s
; GCN-LABEL: {{^}}add_shr_i32:
; NOSDWA: v_lshrrev_b32_e32 v[[DST:[0-9]+]], 16, v{{[0-9]+}}
@@ -35,7 +35,7 @@ define amdgpu_kernel void @sub_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)*
; GCN-LABEL: {{^}}mul_shr_i32:
; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
-; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v[[DST1]], v[[DST0]]
+; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v[[DST0]], v[[DST1]]
; NOSDWA-NOT: v_mul_u32_u24_sdwa
; SDWA: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
@@ -68,9 +68,9 @@ entry:
; GCN-LABEL: {{^}}mul_v2i16:
; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
-; NOSDWA: v_mul_u32_u24_e32 v[[DST_MUL:[0-9]+]], v[[DST1]], v[[DST0]]
+; NOSDWA: v_mul_u32_u24_e32 v[[DST_MUL:[0-9]+]], v[[DST0]], v[[DST1]]
; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
-; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v[[DST_SHL]], v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
; NOSDWA-NOT: v_mul_u32_u24_sdwa
; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL_LO:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
@@ -168,14 +168,14 @@ entry:
; GCN-LABEL: {{^}}mul_v2half:
; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
-; NOSDWA: v_mul_f16_e32 v[[DST_MUL:[0-9]+]], v[[DST1]], v[[DST0]]
+; NOSDWA: v_mul_f16_e32 v[[DST_MUL:[0-9]+]], v[[DST0]], v[[DST1]]
; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
-; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v[[DST_SHL]], v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
; NOSDWA-NOT: v_mul_f16_sdwa
; VI-DAG: v_mul_f16_sdwa v[[DST_MUL_HI:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-DAG: v_mul_f16_e32 v[[DST_MUL_LO:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL_HI]], v[[DST_MUL_LO]]
+; VI: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL_LO]], v[[DST_MUL_HI]]
; GFX9: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
@@ -362,9 +362,9 @@ entry:
; GCN-LABEL: {{^}}mac_v2half:
; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
-; NOSDWA: v_mac_f16_e32 v[[DST_MAC:[0-9]+]], v[[DST1]], v[[DST0]]
+; NOSDWA: v_mac_f16_e32 v[[DST_MAC:[0-9]+]], v[[DST0]], v[[DST1]]
; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MAC]]
-; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v[[DST_SHL]], v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
; NOSDWA-NOT: v_mac_f16_sdwa
; VI: v_mac_f16_sdwa v[[DST_MAC:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
@@ -491,7 +491,7 @@ entry:
%tmp17 = shufflevector <2 x i8> %tmp10, <2 x i8> %tmp12, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%tmp18 = shufflevector <2 x i8> %tmp14, <2 x i8> %tmp16, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%tmp19 = shufflevector <4 x i8> %tmp17, <4 x i8> %tmp18, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-
+
%arrayidx5 = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %destValues, i64 %idxprom
store <8 x i8> %tmp19, <8 x i8> addrspace(1)* %arrayidx5, align 8
ret void
diff --git a/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll b/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
index 3417eb02b361..e0619251f920 100644
--- a/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
+++ b/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
@@ -103,7 +103,7 @@ define amdgpu_kernel void @add_select_multi_use_rhs_fabs_fabs_f32(i32 %c) #0 {
; GCN: v_and_b32_e32 [[X_ABS:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X_ABS]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
define amdgpu_kernel void @add_select_fabs_var_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -122,7 +122,7 @@ define amdgpu_kernel void @add_select_fabs_var_f32(i32 %c) #0 {
; GCN: v_and_b32_e32 [[FABS_X:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[FABS_X]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
define amdgpu_kernel void @add_select_fabs_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -154,7 +154,7 @@ define amdgpu_kernel void @add_select_fabs_negk_negk_f32(i32 %c) #0 {
; GCN: buffer_load_dword [[X:v[0-9]+]]
; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], 1.0, 2.0, s
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]]
define amdgpu_kernel void @add_select_posk_posk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -171,7 +171,7 @@ define amdgpu_kernel void @add_select_posk_posk_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[FABS_X:v[0-9]+]], 0x7fffffff, [[X]]
; GCN-DAG: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[FABS_X]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
define amdgpu_kernel void @add_select_negk_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -191,7 +191,7 @@ define amdgpu_kernel void @add_select_negk_fabs_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[FABS_X:v[0-9]+]], 0x7fffffff, [[X]]
; GCN-DAG: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K]], [[FABS_X]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
define amdgpu_kernel void @add_select_negliteralk_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -245,7 +245,7 @@ define amdgpu_kernel void @add_select_posk_fabs_f32(i32 %c) #0 {
; GCN: buffer_load_dword [[Z:v[0-9]+]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
define amdgpu_kernel void @add_select_fneg_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -266,8 +266,8 @@ define amdgpu_kernel void @add_select_fneg_fneg_f32(i32 %c) #0 {
; GCN: buffer_load_dword [[W:v[0-9]+]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
-; GCN-DAG: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
-; GCN-DAG: v_subrev_f32_e32 v{{[0-9]+}}, [[X]], [[W]]
+; GCN-DAG: v_sub_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
+; GCN-DAG: v_sub_f32_e32 v{{[0-9]+}}, [[W]], [[X]]
define amdgpu_kernel void @add_select_multi_use_lhs_fneg_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -291,7 +291,7 @@ define amdgpu_kernel void @add_select_multi_use_lhs_fneg_fneg_f32(i32 %c) #0 {
; GCN-DAG: v_xor_b32_e32 [[NEG_X:v[0-9]+]], 0x80000000, [[X]]
; GCN-DAG: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
-; GCN-DAG: v_subrev_f32_e32 [[ADD:v[0-9]+]], [[SELECT]], [[Z]]
+; GCN-DAG: v_sub_f32_e32 [[ADD:v[0-9]+]], [[Z]], [[SELECT]]
; GCN: buffer_store_dword [[ADD]]
; GCN: buffer_store_dword [[NEG_X]]
@@ -316,8 +316,8 @@ define amdgpu_kernel void @add_select_multi_store_use_lhs_fneg_fneg_f32(i32 %c)
; GCN: buffer_load_dword [[W:v[0-9]+]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
-; GCN-DAG: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
-; GCN-DAG: v_subrev_f32_e32 v{{[0-9]+}}, [[Y]], [[W]]
+; GCN-DAG: v_sub_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
+; GCN-DAG: v_sub_f32_e32 v{{[0-9]+}}, [[W]], [[Y]]
define amdgpu_kernel void @add_select_multi_use_rhs_fneg_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -341,7 +341,7 @@ define amdgpu_kernel void @add_select_multi_use_rhs_fneg_fneg_f32(i32 %c) #0 {
; GCN: v_xor_b32_e32 [[X_NEG:v[0-9]+]], 0x80000000, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X_NEG]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
define amdgpu_kernel void @add_select_fneg_var_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -359,7 +359,7 @@ define amdgpu_kernel void @add_select_fneg_var_f32(i32 %c) #0 {
; GCN: buffer_load_dword [[Y:v[0-9]+]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 1.0, [[X]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
define amdgpu_kernel void @add_select_fneg_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -377,7 +377,7 @@ define amdgpu_kernel void @add_select_fneg_negk_f32(i32 %c) #0 {
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0xbe22f983
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K]], [[X]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
define amdgpu_kernel void @add_select_fneg_inv2pi_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -397,7 +397,7 @@ define amdgpu_kernel void @add_select_fneg_inv2pi_f32(i32 %c) #0 {
; SI: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K]], [[X]], vcc
; VI: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 0.15915494, [[X]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
define amdgpu_kernel void @add_select_fneg_neginv2pi_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -414,7 +414,7 @@ define amdgpu_kernel void @add_select_fneg_neginv2pi_f32(i32 %c) #0 {
; GCN: v_cmp_eq_u32_e64
; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]]
define amdgpu_kernel void @add_select_negk_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -431,7 +431,7 @@ define amdgpu_kernel void @add_select_negk_negk_f32(i32 %c) #0 {
; GCN: v_cmp_eq_u32_e64
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K1]], [[K0]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]]
define amdgpu_kernel void @add_select_negliteralk_negliteralk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -445,7 +445,7 @@ define amdgpu_kernel void @add_select_negliteralk_negliteralk_f32(i32 %c) #0 {
; GCN: buffer_load_dword [[X:v[0-9]+]]
; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
define amdgpu_kernel void @add_select_fneg_negk_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -462,7 +462,7 @@ define amdgpu_kernel void @add_select_fneg_negk_negk_f32(i32 %c) #0 {
; GCN: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 1.0, [[X]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
define amdgpu_kernel void @add_select_negk_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -479,7 +479,7 @@ define amdgpu_kernel void @add_select_negk_fneg_f32(i32 %c) #0 {
; GCN: buffer_load_dword [[Y:v[0-9]+]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[X]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
define amdgpu_kernel void @add_select_fneg_posk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -497,7 +497,7 @@ define amdgpu_kernel void @add_select_fneg_posk_f32(i32 %c) #0 {
; GCN: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[X]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
define amdgpu_kernel void @add_select_posk_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -517,7 +517,7 @@ define amdgpu_kernel void @add_select_posk_fneg_f32(i32 %c) #0 {
; GCN-DAG: v_or_b32_e32 [[X_NEG_ABS:v[0-9]+]], 0x80000000, [[X]]
; GCN-DAG: v_and_b32_e32 [[Y_ABS:v[0-9]+]], 0x7fffffff, [[Y]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_ABS]], [[X_NEG_ABS]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
define amdgpu_kernel void @add_select_negfabs_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -540,7 +540,7 @@ define amdgpu_kernel void @add_select_negfabs_fabs_f32(i32 %c) #0 {
; GCN-DAG: v_or_b32_e32 [[Y_NEG_ABS:v[0-9]+]], 0x80000000, [[Y]]
; GCN-DAG: v_and_b32_e32 [[X_ABS:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_NEG_ABS]], [[X_ABS]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
define amdgpu_kernel void @add_select_fabs_negfabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -563,7 +563,7 @@ define amdgpu_kernel void @add_select_fabs_negfabs_f32(i32 %c) #0 {
; GCN-DAG: v_xor_b32_e32 [[X_NEG:v[0-9]+]], 0x80000000, [[X]]
; GCN-DAG: v_and_b32_e32 [[Y_ABS:v[0-9]+]], 0x7fffffff, [[Y]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_ABS]], [[X_NEG]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
define amdgpu_kernel void @add_select_neg_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -585,7 +585,7 @@ define amdgpu_kernel void @add_select_neg_fabs_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[X_ABS:v[0-9]+]], 0x7fffffff, [[X]]
; GCN-DAG: v_xor_b32_e32 [[Y_NEG:v[0-9]+]], 0x80000000, [[Y]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_NEG]], [[X_ABS]], vcc
-; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
define amdgpu_kernel void @add_select_fabs_neg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -606,7 +606,7 @@ define amdgpu_kernel void @add_select_fabs_neg_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[Y_ABS:v[0-9]+]], 0x7fffffff, [[Y]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_ABS]], [[X]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
define amdgpu_kernel void @add_select_neg_negfabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -628,7 +628,7 @@ define amdgpu_kernel void @add_select_neg_negfabs_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[X_ABS:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[X_ABS]], [[Y]], vcc
-; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
define amdgpu_kernel void @add_select_negfabs_neg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
diff --git a/test/CodeGen/AMDGPU/select-vectors.ll b/test/CodeGen/AMDGPU/select-vectors.ll
index ebbc675b2bab..b77ebcf5bf52 100644
--- a/test/CodeGen/AMDGPU/select-vectors.ll
+++ b/test/CodeGen/AMDGPU/select-vectors.ll
@@ -1,6 +1,6 @@
-; RUN: llc -verify-machineinstrs -march=amdgcn < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -march=amdgcn < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -verify-machineinstrs -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
; Test expansion of scalar selects on vectors.
; Evergreen not enabled since it seems to be having problems with doubles.
diff --git a/test/CodeGen/AMDGPU/select.f16.ll b/test/CodeGen/AMDGPU/select.f16.ll
index 92ee2eb7f403..e79ce3af0cf9 100644
--- a/test/CodeGen/AMDGPU/select.f16.ll
+++ b/test/CodeGen/AMDGPU/select.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}select_f16:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
@@ -104,8 +104,8 @@ entry:
; SI: v_cndmask_b32_e32 v[[R_F32:[0-9]+]], 0.5, v[[D_F32]], vcc
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_cmp_nlt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
; VI: v_mov_b32_e32 v[[C_F16:[0-9]+]], 0x3800{{$}}
+; VI: v_cmp_nlt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
; VI: v_cndmask_b32_e32 v[[R_F16:[0-9]+]], v[[C_F16]], v[[D_F16]], vcc
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
@@ -134,8 +134,8 @@ entry:
; SI: v_cmp_lt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
; SI: v_cndmask_b32_e32 v[[R_F32:[0-9]+]], 0.5, v[[C_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_cmp_lt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
; VI: v_mov_b32_e32 v[[D_F16:[0-9]+]], 0x3800{{$}}
+; VI: v_cmp_lt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
; VI: v_cndmask_b32_e32 v[[R_F16:[0-9]+]], v[[D_F16]], v[[C_F16]], vcc
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
@@ -159,16 +159,16 @@ entry:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_lt_f32_e64
; SI: v_cmp_lt_f32_e32
; SI: v_cndmask_b32_e32
-; SI: v_cndmask_b32_e64
+; SI: v_cmp_lt_f32_e32
+; SI: v_cndmask_b32_e32
; SI: v_cvt_f16_f32_e32
; SI: v_cvt_f16_f32_e32
-; VI: v_cmp_lt_f16_e64
; VI: v_cmp_lt_f16_e32
-; VI: v_cndmask_b32_e64
+; VI: v_cndmask_b32_e32
+; VI: v_cmp_lt_f16_e32
; VI: v_cndmask_b32_e32
; GCN: s_endpgm
@@ -196,13 +196,17 @@ entry:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI-DAG: v_cmp_gt_f32_e64
-; SI-DAG: v_cmp_lt_f32_e32 vcc, 0.5
-; VI: v_cmp_lt_f16_e32
-; VI: v_cmp_gt_f16_e64
-; GCN: v_cndmask_b32_e32
-; GCN: v_cndmask_b32_e64
+; SI: v_cmp_lt_f32_e32 vcc, 0.5
+; SI: v_cndmask_b32_e32
+; SI: v_cmp_gt_f32_e32
+; SI: v_cndmask_b32_e32
+
+; VI: v_cmp_lt_f16_e32
+; VI: v_cndmask_b32_e32
+; VI: v_cmp_gt_f16_e32
+; VI: v_cndmask_b32_e32
+
; SI: v_cvt_f16_f32_e32
; SI: v_cvt_f16_f32_e32
; GCN: s_endpgm
@@ -228,13 +232,16 @@ entry:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI-DAG: v_cmp_lt_f32_e64
-; SI-DAG: v_cmp_gt_f32_e32 vcc, 0.5
-; VI: v_cmp_gt_f16_e32
-; VI: v_cmp_lt_f16_e64
-; GCN: v_cndmask_b32_e32
-; GCN: v_cndmask_b32_e64
+; SI: v_cmp_gt_f32_e32 vcc, 0.5
+; SI: v_cndmask_b32_e32
+; SI: v_cmp_lt_f32_e32
+; SI: v_cndmask_b32_e32
+
+; VI: v_cmp_gt_f16_e32
+; VI: v_cndmask_b32_e32
+; VI: v_cmp_lt_f16_e32
+; VI: v_cndmask_b32_e32
; SI: v_cvt_f16_f32_e32
; SI: v_cvt_f16_f32_e32
@@ -263,8 +270,8 @@ entry:
; SI: v_cvt_f32_f16_e32
; SI: v_cmp_nlt_f32_e32
-; SI: v_cmp_nlt_f32_e64
-; SI: v_cndmask_b32_e64
+; SI: v_cndmask_b32_e32
+; SI: v_cmp_nlt_f32_e32
; SI: v_cndmask_b32_e32
; VI: v_cmp_nlt_f16_e32
@@ -298,13 +305,17 @@ entry:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_lt_f32_e64
+
; SI: v_cmp_lt_f32_e32
+; SI: v_cndmask_b32
+; SI: v_cmp_lt_f32_e32
+; SI: v_cndmask_b32
; VI: v_cmp_lt_f16_e32
-; VI: v_cmp_lt_f16_e64
-; GCN: v_cndmask_b32
-; GCN: v_cndmask_b32
+; VI: v_cndmask_b32
+; VI: v_cmp_lt_f16_e32
+; VI: v_cndmask_b32
+
; SI: v_cvt_f16_f32_e32
; SI: v_cvt_f16_f32_e32
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/setcc-fneg-constant.ll b/test/CodeGen/AMDGPU/setcc-fneg-constant.ll
index 8d455d84bf9e..bcaa1aa54c15 100644
--- a/test/CodeGen/AMDGPU/setcc-fneg-constant.ll
+++ b/test/CodeGen/AMDGPU/setcc-fneg-constant.ll
@@ -7,7 +7,7 @@
; GCN: buffer_load_dword [[B:v[0-9]+]]
; GCN: buffer_load_dword [[C:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[A]], [[B]]
; GCN: v_cmp_eq_f32_e32 vcc, -4.0, [[MUL]]
; GCN: buffer_store_dword [[MUL]]
define amdgpu_kernel void @multi_use_fneg_src() #0 {
@@ -30,7 +30,7 @@ define amdgpu_kernel void @multi_use_fneg_src() #0 {
; GCN: buffer_load_dword [[B:v[0-9]+]]
; GCN: buffer_load_dword [[C:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[A]], [[B]]
; GCN: v_cmp_eq_f32_e32 vcc, -4.0, [[A]]
; GCN: v_mul_f32_e64 [[USE1:v[0-9]+]], [[MUL]], -[[MUL]]
define amdgpu_kernel void @multi_foldable_use_fneg_src() #0 {
@@ -78,7 +78,7 @@ define amdgpu_kernel void @multi_use_fneg() #0 {
; GCN: buffer_load_dword [[A:v[0-9]+]]
; GCN: buffer_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[MUL0:v[0-9]+]], [[B]], [[A]]
+; GCN: v_mul_f32_e32 [[MUL0:v[0-9]+]], [[A]], [[B]]
; GCN: v_cmp_eq_f32_e32 vcc, -4.0, [[MUL0]]
; GCN: v_mul_f32_e64 [[MUL1:v[0-9]+]], -[[MUL0]], [[MUL0]]
; GCN: buffer_store_dword [[MUL1]]
diff --git a/test/CodeGen/AMDGPU/setcc.ll b/test/CodeGen/AMDGPU/setcc.ll
index f63719d62a84..a3bf167e756a 100644
--- a/test/CodeGen/AMDGPU/setcc.ll
+++ b/test/CodeGen/AMDGPU/setcc.ll
@@ -7,8 +7,8 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[3].X, KC0[3].Z
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[2].W, KC0[3].Y
-; GCN-DAG: v_cmp_eq_u32_e32
-; GCN-DAG: v_cmp_eq_u32_e64
+; GCN: v_cmp_eq_u32_e32
+; GCN: v_cmp_eq_u32_e32
define amdgpu_kernel void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
%result = icmp eq <2 x i32> %a, %b
%sext = sext <2 x i1> %result to <2 x i32>
@@ -23,9 +23,9 @@ define amdgpu_kernel void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; GCN: v_cmp_eq_u32_e32
-; GCN: v_cmp_eq_u32_e64
-; GCN: v_cmp_eq_u32_e64
-; GCN: v_cmp_eq_u32_e64
+; GCN: v_cmp_eq_u32_e32
+; GCN: v_cmp_eq_u32_e32
+; GCN: v_cmp_eq_u32_e32
define amdgpu_kernel void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1)* %in
diff --git a/test/CodeGen/AMDGPU/sext-in-reg.ll b/test/CodeGen/AMDGPU/sext-in-reg.ll
index 160fb6a038fe..5b4d9ed259b6 100644
--- a/test/CodeGen/AMDGPU/sext-in-reg.ll
+++ b/test/CodeGen/AMDGPU/sext-in-reg.ll
@@ -1,7 +1,7 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FIXME: i16 promotion pass ruins the scalar cases when legal.
; FIXME: r600 fails verifier
diff --git a/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll b/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll
index fb0bbaa9cbf2..8250bad7b0a1 100644
--- a/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll
+++ b/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
; Copy VGPR -> SGPR used twice as an instruction operand, which is then
; used in an REG_SEQUENCE that also needs to be handled.
diff --git a/test/CodeGen/AMDGPU/sgpr-copy.ll b/test/CodeGen/AMDGPU/sgpr-copy.ll
index 931051102cd5..3b24cf82d783 100644
--- a/test/CodeGen/AMDGPU/sgpr-copy.ll
+++ b/test/CodeGen/AMDGPU/sgpr-copy.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}phi1:
; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0
diff --git a/test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll b/test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll
index 4f7b61adc91d..2f9eed457ab6 100644
--- a/test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll
+++ b/test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; Extract the high bit of the 1st quarter
; GCN-LABEL: {{^}}v_uextract_bit_31_i128:
@@ -98,7 +98,7 @@ define amdgpu_kernel void @v_uextract_bit_127_i128(i128 addrspace(1)* %out, i128
; GCN-DAG: v_lshrrev_b32_e32 v[[ELT1PART:[0-9]+]], 2, v{{[[0-9]+}}
; GCN-DAG: v_bfe_u32 v[[ELT2PART:[0-9]+]], v[[VAL3]], 2, 2{{$}}
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; GCN-DAG: v_or_b32_e32 v[[OR0:[0-9]+]], v[[SHLLO]], v[[ELT1PART]]
+; GCN-DAG: v_or_b32_e32 v[[OR0:[0-9]+]], v[[ELT1PART]], v[[SHLLO]]
; GCN-DAG: v_mov_b32_e32 v[[ZERO1:[0-9]+]], v[[ZERO]]{{$}}
; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[OR0]]:[[ZERO1]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
diff --git a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
index c70eb9b9c4a5..670287ba7937 100644
--- a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
+++ b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; Make sure 64-bit BFE pattern does a 32-bit BFE on the relevant half.
diff --git a/test/CodeGen/AMDGPU/shift-i64-opts.ll b/test/CodeGen/AMDGPU/shift-i64-opts.ll
index 5306e190a4f9..f3faa39c64e6 100644
--- a/test/CodeGen/AMDGPU/shift-i64-opts.ll
+++ b/test/CodeGen/AMDGPU/shift-i64-opts.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=FAST64 -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=SLOW64 -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=FAST64 -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=SLOW64 -check-prefix=GCN %s
; lshr (i64 x), c: c > 32 => reg_sequence lshr (i32 hi_32(x)), (c - 32), 0
diff --git a/test/CodeGen/AMDGPU/shl.ll b/test/CodeGen/AMDGPU/shl.ll
index edc313ee323b..13ac9140b827 100644
--- a/test/CodeGen/AMDGPU/shl.ll
+++ b/test/CodeGen/AMDGPU/shl.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
; XUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() #0
diff --git a/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir b/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
index 6248d8a46daf..767118eb8d11 100644
--- a/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
+++ b/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
@@ -6,92 +6,7 @@
# that the post-RA run does manage to shrink it, but right now the
# resume crashes
---- |
- define amdgpu_kernel void @shrink_add_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %tid.ext = sext i32 %tid to i64
- %a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
- %b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
- %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
- %a = load volatile i32, i32 addrspace(1)* %a.ptr
- %b = load volatile i32, i32 addrspace(1)* %b.ptr
- %result = add i32 %a, %b
- store volatile i32 %result, i32 addrspace(1)* %out.gep
- ret void
- }
-
- define amdgpu_kernel void @shrink_sub_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %tid.ext = sext i32 %tid to i64
- %a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
- %b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
- %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
- %a = load volatile i32, i32 addrspace(1)* %a.ptr
- %b = load volatile i32, i32 addrspace(1)* %b.ptr
- %result = sub i32 %a, %b
- store volatile i32 %result, i32 addrspace(1)* %out.gep
- ret void
- }
-
- define amdgpu_kernel void @shrink_subrev_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %tid.ext = sext i32 %tid to i64
- %a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
- %b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
- %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
- %a = load volatile i32, i32 addrspace(1)* %a.ptr
- %b = load volatile i32, i32 addrspace(1)* %b.ptr
- %result = sub i32 %a, %b
- store volatile i32 %result, i32 addrspace(1)* %out.gep
- ret void
- }
-
- define amdgpu_kernel void @check_addc_src2_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %tid.ext = sext i32 %tid to i64
- %a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
- %b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
- %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
- %a = load volatile i32, i32 addrspace(1)* %a.ptr
- %b = load volatile i32, i32 addrspace(1)* %b.ptr
- %result = add i32 %a, %b
- store volatile i32 %result, i32 addrspace(1)* %out.gep
- ret void
- }
-
- define amdgpu_kernel void @shrink_addc_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %tid.ext = sext i32 %tid to i64
- %a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
- %b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
- %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
- %a = load volatile i32, i32 addrspace(1)* %a.ptr
- %b = load volatile i32, i32 addrspace(1)* %b.ptr
- %result = add i32 %a, %b
- store volatile i32 %result, i32 addrspace(1)* %out.gep
- ret void
- }
-
- define amdgpu_kernel void @shrink_addc_undef_vcc(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %tid.ext = sext i32 %tid to i64
- %a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
- %b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
- %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
- %a = load volatile i32, i32 addrspace(1)* %a.ptr
- %b = load volatile i32, i32 addrspace(1)* %b.ptr
- %result = add i32 %a, %b
- store volatile i32 %result, i32 addrspace(1)* %out.gep
- ret void
- }
-
- declare i32 @llvm.amdgcn.workitem.id.x() #1
-
- attributes #0 = { nounwind }
- attributes #1 = { nounwind readnone }
-
...
----
# GCN-LABEL: name: shrink_add_vop3{{$}}
# GCN: %29, %9 = V_ADD_I32_e64 %19, %17, implicit %exec
# GCN: %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
@@ -151,13 +66,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@@ -166,11 +81,11 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
- %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
- %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%29, %9 = V_ADD_I32_e64 %19, %17, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
- BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
+ BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@@ -235,13 +150,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@@ -250,11 +165,11 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
- %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
- %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%29, %9 = V_SUB_I32_e64 %19, %17, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
- BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
+ BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@@ -319,13 +234,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@@ -334,11 +249,11 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
- %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
- %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%29, %9 = V_SUBREV_I32_e64 %19, %17, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
- BUFFER_STORE_DWORD_ADDR64 %29, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
+ BUFFER_STORE_DWORD_ADDR64 %29, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@@ -402,13 +317,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@@ -417,18 +332,18 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
- %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
- %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%9 = S_MOV_B64 0
%29, %vcc = V_ADDC_U32_e64 %19, %17, %9, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
- BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
+ BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
---
# GCN-LABEL: name: shrink_addc_vop3{{$}}
-# GCN: %29 = V_ADDC_U32_e32 %17, %19, implicit-def %vcc, implicit %vcc, implicit %exec
+# GCN: %29 = V_ADDC_U32_e32 %19, %17, implicit-def %vcc, implicit %vcc, implicit %exec
# GCN %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
name: shrink_addc_vop3
@@ -487,13 +402,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@@ -502,19 +417,19 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
- %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
- %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%vcc = S_MOV_B64 0
%29, %vcc = V_ADDC_U32_e64 %19, %17, %vcc, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
- BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
+ BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
---
# GCN-LABEL: name: shrink_addc_undef_vcc{{$}}
-# GCN: %29 = V_ADDC_U32_e32 %17, %19, implicit-def %vcc, implicit undef %vcc, implicit %exec
+# GCN: %29 = V_ADDC_U32_e32 %19, %17, implicit-def %vcc, implicit undef %vcc, implicit %exec
# GCN: %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
name: shrink_addc_undef_vcc
alignment: 0
@@ -572,13 +487,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
- bb.0 (%ir-block.0):
+ bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
- %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
- %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@@ -587,11 +502,11 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
- %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
- %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%29, %vcc = V_ADDC_U32_e64 %19, %17, undef %vcc, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
- BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
+ BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
diff --git a/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll b/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
index 348c7200c0bc..17109187d538 100644
--- a/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
+++ b/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=bonaire -enable-amdgpu-aa=0 -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -check-prefix=FUNC -check-prefix=CI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -enable-amdgpu-aa=0 -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -check-prefix=FUNC -check-prefix=CI %s
declare void @llvm.amdgcn.tbuffer.store.i32(i32, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1)
declare void @llvm.amdgcn.tbuffer.store.v4i32(<4 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1)
diff --git a/test/CodeGen/AMDGPU/sign_extend.ll b/test/CodeGen/AMDGPU/sign_extend.ll
index 3e452c214e98..c80945f390be 100644
--- a/test/CodeGen/AMDGPU/sign_extend.ll
+++ b/test/CodeGen/AMDGPU/sign_extend.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
; GCN-LABEL: {{^}}s_sext_i1_to_i32:
; GCN: v_cndmask_b32_e64
diff --git a/test/CodeGen/AMDGPU/sitofp.f16.ll b/test/CodeGen/AMDGPU/sitofp.f16.ll
index 574d1c0b2c78..0bcef99df39f 100644
--- a/test/CodeGen/AMDGPU/sitofp.f16.ll
+++ b/test/CodeGen/AMDGPU/sitofp.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}sitofp_i16_to_f16
; GCN: buffer_load_{{sshort|ushort}} v[[A_I16:[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/sminmax.ll b/test/CodeGen/AMDGPU/sminmax.ll
index 827d672022eb..41430715f347 100644
--- a/test/CodeGen/AMDGPU/sminmax.ll
+++ b/test/CodeGen/AMDGPU/sminmax.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}s_abs_i32:
; GCN: s_abs_i32
@@ -18,7 +18,7 @@ define amdgpu_kernel void @s_abs_i32(i32 addrspace(1)* %out, i32 %val) nounwind
; FUNC-LABEL: {{^}}v_abs_i32:
; GCN: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SRC:v[0-9]+]]
-; GCN: v_max_i32_e32 {{v[0-9]+}}, [[NEG]], [[SRC]]
+; GCN: v_max_i32_e32 {{v[0-9]+}}, [[SRC]], [[NEG]]
; GCN: v_add_i32
; EG: MAX_INT
@@ -34,7 +34,7 @@ define amdgpu_kernel void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %
; GCN-LABEL: {{^}}v_abs_i32_repeat_user:
; GCN: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SRC:v[0-9]+]]
-; GCN: v_max_i32_e32 [[MAX:v[0-9]+]], [[NEG]], [[SRC]]
+; GCN: v_max_i32_e32 [[MAX:v[0-9]+]], [[SRC]], [[NEG]]
; GCN: v_mul_lo_i32 v{{[0-9]+}}, [[MAX]], [[MAX]]
define amdgpu_kernel void @v_abs_i32_repeat_user(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
%val = load i32, i32 addrspace(1)* %src, align 4
@@ -71,8 +71,8 @@ define amdgpu_kernel void @s_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
; GCN-DAG: v_sub_i32_e32 [[NEG0:v[0-9]+]], vcc, 0, [[SRC0:v[0-9]+]]
; GCN-DAG: v_sub_i32_e32 [[NEG1:v[0-9]+]], vcc, 0, [[SRC1:v[0-9]+]]
-; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[NEG0]], [[SRC0]]
-; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[NEG1]], [[SRC1]]
+; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC0]], [[NEG0]]
+; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC1]], [[NEG1]]
; GCN: v_add_i32
; GCN: v_add_i32
@@ -132,10 +132,10 @@ define amdgpu_kernel void @s_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %
; GCN-DAG: v_sub_i32_e32 [[NEG2:v[0-9]+]], vcc, 0, [[SRC2:v[0-9]+]]
; GCN-DAG: v_sub_i32_e32 [[NEG3:v[0-9]+]], vcc, 0, [[SRC3:v[0-9]+]]
-; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[NEG0]], [[SRC0]]
-; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[NEG1]], [[SRC1]]
-; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[NEG2]], [[SRC2]]
-; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[NEG3]], [[SRC3]]
+; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC0]], [[NEG0]]
+; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC1]], [[NEG1]]
+; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC2]], [[NEG2]]
+; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC3]], [[NEG3]]
; GCN: v_add_i32
; GCN: v_add_i32
@@ -184,8 +184,8 @@ define amdgpu_kernel void @s_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(
; GCN: {{buffer|flat}}_load_dword [[VAL0:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[VAL1:v[0-9]+]]
-; GCN-DAG: v_min_i32_e32 v{{[0-9]+}}, [[VAL1]], [[VAL0]]
-; GCN-DAG: v_max_i32_e32 v{{[0-9]+}}, [[VAL1]], [[VAL0]]
+; GCN-DAG: v_min_i32_e32 v{{[0-9]+}}, [[VAL0]], [[VAL1]]
+; GCN-DAG: v_max_i32_e32 v{{[0-9]+}}, [[VAL0]], [[VAL1]]
define amdgpu_kernel void @v_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr0, i32 addrspace(1)* %ptr1) nounwind {
%val0 = load volatile i32, i32 addrspace(1)* %ptr0
%val1 = load volatile i32, i32 addrspace(1)* %ptr1
diff --git a/test/CodeGen/AMDGPU/sminmax.v2i16.ll b/test/CodeGen/AMDGPU/sminmax.v2i16.ll
index a9aac2d8abb7..27263429650d 100644
--- a/test/CodeGen/AMDGPU/sminmax.v2i16.ll
+++ b/test/CodeGen/AMDGPU/sminmax.v2i16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=CIVI -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=CIVI -check-prefix=GCN %s
; GCN-LABEL: {{^}}s_abs_v2i16:
; GFX9: s_load_dword [[VAL:s[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/spill-cfg-position.ll b/test/CodeGen/AMDGPU/spill-cfg-position.ll
index 1ca0919258a8..cbf9f37e29ef 100644
--- a/test/CodeGen/AMDGPU/spill-cfg-position.ll
+++ b/test/CodeGen/AMDGPU/spill-cfg-position.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -stress-regalloc=6 < %s | FileCheck %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -stress-regalloc=6 < %s | FileCheck %s
; Inline spiller can decide to move a spill as early as possible in the basic block.
; It will skip phis and label, but we also need to make sure it skips instructions
diff --git a/test/CodeGen/AMDGPU/sra.ll b/test/CodeGen/AMDGPU/sra.ll
index 44cfdf6398ae..74618b263bad 100644
--- a/test/CodeGen/AMDGPU/sra.ll
+++ b/test/CodeGen/AMDGPU/sra.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() #0
diff --git a/test/CodeGen/AMDGPU/srem.ll b/test/CodeGen/AMDGPU/srem.ll
index e06725892089..51eaf9a960b0 100644
--- a/test/CodeGen/AMDGPU/srem.ll
+++ b/test/CodeGen/AMDGPU/srem.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s
define amdgpu_kernel void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/AMDGPU/srl.ll b/test/CodeGen/AMDGPU/srl.ll
index cb40ecf2de1c..8878b4538555 100644
--- a/test/CodeGen/AMDGPU/srl.ll
+++ b/test/CodeGen/AMDGPU/srl.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
; XUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() #0
diff --git a/test/CodeGen/AMDGPU/ssubo.ll b/test/CodeGen/AMDGPU/ssubo.ll
index 135632343f90..d65c2adc7e20 100644
--- a/test/CodeGen/AMDGPU/ssubo.ll
+++ b/test/CodeGen/AMDGPU/ssubo.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs< %s
declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/sub.i16.ll b/test/CodeGen/AMDGPU/sub.i16.ll
index 1d407ea9bcda..14bedceed6ee 100644
--- a/test/CodeGen/AMDGPU/sub.i16.ll
+++ b/test/CodeGen/AMDGPU/sub.i16.ll
@@ -5,7 +5,7 @@
; GCN-LABEL: {{^}}v_test_sub_i16:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_subrev_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; VI: v_sub_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; VI-NEXT: buffer_store_short [[ADD]]
define amdgpu_kernel void @v_test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -68,7 +68,7 @@ define amdgpu_kernel void @v_test_sub_i16_inline_63(i16 addrspace(1)* %out, i16
; GCN-LABEL: {{^}}v_test_sub_i16_zext_to_i32:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_subrev_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; VI: v_sub_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; VI-NEXT: buffer_store_dword [[ADD]]
define amdgpu_kernel void @v_test_sub_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -88,7 +88,7 @@ define amdgpu_kernel void @v_test_sub_i16_zext_to_i32(i32 addrspace(1)* %out, i1
; VI: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI-DAG: v_subrev_u16_e32 v[[ADD:[0-9]+]], [[B]], [[A]]
+; VI-DAG: v_sub_u16_e32 v[[ADD:[0-9]+]], [[A]], [[B]]
; VI: buffer_store_dwordx2 v{{\[}}[[ADD]]:[[VZERO]]{{\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}}
define amdgpu_kernel void @v_test_sub_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -107,7 +107,7 @@ define amdgpu_kernel void @v_test_sub_i16_zext_to_i64(i64 addrspace(1)* %out, i1
; GCN-LABEL: {{^}}v_test_sub_i16_sext_to_i32:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_subrev_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; VI: v_sub_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; VI-NEXT: v_bfe_i32 [[SEXT:v[0-9]+]], [[ADD]], 0, 16
; VI-NEXT: buffer_store_dword [[SEXT]]
define amdgpu_kernel void @v_test_sub_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
@@ -127,7 +127,7 @@ define amdgpu_kernel void @v_test_sub_i16_sext_to_i32(i32 addrspace(1)* %out, i1
; GCN-LABEL: {{^}}v_test_sub_i16_sext_to_i64:
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_subrev_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; VI: v_sub_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; VI-NEXT: v_bfe_i32 v[[LO:[0-9]+]], [[ADD]], 0, 16
; VI-NEXT: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; VI-NEXT: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
diff --git a/test/CodeGen/AMDGPU/sub.ll b/test/CodeGen/AMDGPU/sub.ll
index e7655df15520..46f1b120f212 100644
--- a/test/CodeGen/AMDGPU/sub.ll
+++ b/test/CodeGen/AMDGPU/sub.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() readnone
diff --git a/test/CodeGen/AMDGPU/sub.v2i16.ll b/test/CodeGen/AMDGPU/sub.v2i16.ll
index ee923e2b8b61..8d5c8b64efb8 100644
--- a/test/CodeGen/AMDGPU/sub.v2i16.ll
+++ b/test/CodeGen/AMDGPU/sub.v2i16.ll
@@ -6,7 +6,7 @@
; GFX9: v_pk_sub_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI: v_subrev_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_sub_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
@@ -165,10 +165,10 @@ define amdgpu_kernel void @v_test_sub_v2i16_inline_fp_split(<2 x i16> addrspace(
; VI: flat_load_ushort v[[B_HI:[0-9]+]]
; VI: flat_load_ushort v[[B_LO:[0-9]+]]
-; VI: v_subrev_u16_e32 v[[ADD_HI:[0-9]+]], v[[B_HI]], v[[A_HI]]
+; VI: v_sub_u16_e32 v[[ADD_HI:[0-9]+]], v[[A_HI]], v[[B_HI]]
; VI-NOT: and
; VI-NOT: shl
-; VI: v_subrev_u16_e32 v[[ADD_LO:[0-9]+]], v[[B_LO]], v[[A_LO]]
+; VI: v_sub_u16_e32 v[[ADD_LO:[0-9]+]], v[[A_LO]], v[[B_LO]]
; VI-NOT: and
; VI-NOT: shl
; VI: buffer_store_dwordx2 v{{\[}}[[ADD_LO]]:[[ADD_HI]]{{\]}}
@@ -201,8 +201,8 @@ define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i32(<2 x i32> addrspace(1)
; VI: flat_load_ushort v[[B_LO:[0-9]+]]
; VI: flat_load_ushort v[[B_HI:[0-9]+]]
-; VI-DAG: v_subrev_u16_e32
-; VI-DAG: v_subrev_u16_e32
+; VI: v_sub_u16_e32
+; VI: v_sub_u16_e32
; VI: buffer_store_dwordx4
define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
@@ -228,8 +228,8 @@ define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i64(<2 x i64> addrspace(1)
; GFX9-DAG: v_ashrrev_i32_e32 v[[ELT1:[0-9]+]], 16, [[ADD]]
; GFX9: buffer_store_dwordx2 v{{\[}}[[ELT0]]:[[ELT1]]{{\]}}
-; VI: v_subrev_u16_e32
-; VI: v_subrev_u16_e32
+; VI: v_sub_u16_e32
+; VI: v_sub_u16_e32
; VI: buffer_store_dwordx2
define amdgpu_kernel void @v_test_sub_v2i16_sext_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -253,7 +253,7 @@ define amdgpu_kernel void @v_test_sub_v2i16_sext_to_v2i32(<2 x i32> addrspace(1)
; GFX9: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI: v_subrev_u16_e32
+; VI: v_sub_u16_e32
; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
diff --git a/test/CodeGen/AMDGPU/syncscopes.ll b/test/CodeGen/AMDGPU/syncscopes.ll
new file mode 100644
index 000000000000..3741ce788993
--- /dev/null
+++ b/test/CodeGen/AMDGPU/syncscopes.ll
@@ -0,0 +1,19 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -stop-before=si-debugger-insert-nops < %s | FileCheck --check-prefix=GCN %s
+
+; GCN-LABEL: name: syncscopes
+; GCN: FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out)
+; GCN: FLAT_STORE_DWORD killed %vgpr4_vgpr5, killed %vgpr3, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out)
+; GCN: FLAT_STORE_DWORD killed %vgpr7_vgpr8, killed %vgpr6, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out)
+define void @syncscopes(
+ i32 %agent,
+ i32 addrspace(4)* %agent_out,
+ i32 %workgroup,
+ i32 addrspace(4)* %workgroup_out,
+ i32 %wavefront,
+ i32 addrspace(4)* %wavefront_out) {
+entry:
+ store atomic i32 %agent, i32 addrspace(4)* %agent_out syncscope("agent") seq_cst, align 4
+ store atomic i32 %workgroup, i32 addrspace(4)* %workgroup_out syncscope("workgroup") seq_cst, align 4
+ store atomic i32 %wavefront, i32 addrspace(4)* %wavefront_out syncscope("wavefront") seq_cst, align 4
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll b/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
index f90040385f75..77a6820713d6 100644
--- a/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
+++ b/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=VI %s
; CHECK-LABEL: {{^}}trunc_i64_bitcast_v2i32:
; CHECK: buffer_load_dword v
diff --git a/test/CodeGen/AMDGPU/trunc.ll b/test/CodeGen/AMDGPU/trunc.ll
index 0c91d52df0c0..da038f4b0597 100644
--- a/test/CodeGen/AMDGPU/trunc.ll
+++ b/test/CodeGen/AMDGPU/trunc.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs< %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -verify-machineinstrs< %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/AMDGPU/uaddo.ll b/test/CodeGen/AMDGPU/uaddo.ll
index 632ccaa7e612..5754bd9bb913 100644
--- a/test/CodeGen/AMDGPU/uaddo.ll
+++ b/test/CodeGen/AMDGPU/uaddo.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
; FUNC-LABEL: {{^}}s_uaddo_i64_zext:
; GCN: s_add_u32
@@ -58,8 +58,8 @@ define amdgpu_kernel void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_uaddo_i32_novcc:
-; GCN: v_add_i32_e64 v{{[0-9]+}}, [[COND:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[COND]]
+; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG: ADDC_UINT
; EG: ADD_INT
diff --git a/test/CodeGen/AMDGPU/udiv.ll b/test/CodeGen/AMDGPU/udiv.ll
index d9dab0d40acf..1d683776bfd5 100644
--- a/test/CodeGen/AMDGPU/udiv.ll
+++ b/test/CodeGen/AMDGPU/udiv.ll
@@ -1,9 +1,9 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -mattr=-fp32-denormals < %s | FileCheck -check-prefix=SI -check-prefix=FUNC -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -mattr=-fp32-denormals < %s | FileCheck -check-prefix=SI -check-prefix=FUNC -check-prefix=VI %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+fp32-denormals < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+fp32-denormals < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}udiv_i32:
; EG-NOT: SETGE_INT
diff --git a/test/CodeGen/AMDGPU/uitofp.f16.ll b/test/CodeGen/AMDGPU/uitofp.f16.ll
index 0c3b0fcaf854..eaa1d073cafb 100644
--- a/test/CodeGen/AMDGPU/uitofp.f16.ll
+++ b/test/CodeGen/AMDGPU/uitofp.f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}uitofp_i16_to_f16
; GCN: buffer_load_ushort v[[A_I16:[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/urem.ll b/test/CodeGen/AMDGPU/urem.ll
index fb4eab43a2d6..823c918dcda7 100644
--- a/test/CodeGen/AMDGPU/urem.ll
+++ b/test/CodeGen/AMDGPU/urem.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; The code generated by urem is long and complex and may frequently
; change. The goal of this test is to make sure the ISel doesn't fail
diff --git a/test/CodeGen/AMDGPU/usubo.ll b/test/CodeGen/AMDGPU/usubo.ll
index d1f454f0bc65..f01bf498e0d8 100644
--- a/test/CodeGen/AMDGPU/usubo.ll
+++ b/test/CodeGen/AMDGPU/usubo.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
; FUNC-LABEL: {{^}}s_usubo_i64_zext:
; GCN: s_sub_u32
@@ -58,8 +58,8 @@ define amdgpu_kernel void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_usubo_i32_novcc:
-; GCN: v_sub_i32_e64 v{{[0-9]+}}, [[COND:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[COND]]
+; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG-DAG: SUBB_UINT
; EG-DAG: SUB_INT
@@ -120,7 +120,7 @@ define amdgpu_kernel void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_usubo_i16:
-; VI: v_subrev_u16_e32
+; VI: v_sub_u16_e32
; VI: v_cmp_gt_u16_e32
define amdgpu_kernel void @v_usubo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
diff --git a/test/CodeGen/AMDGPU/v_cndmask.ll b/test/CodeGen/AMDGPU/v_cndmask.ll
index d4a68a418ee4..5cbfae34e1bb 100644
--- a/test/CodeGen/AMDGPU/v_cndmask.ll
+++ b/test/CodeGen/AMDGPU/v_cndmask.ll
@@ -200,9 +200,9 @@ define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i32(i32 addrspace(1)* %
; SI-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v[[Z_HI]], vcc
; SI-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v[[Z_LO]], vcc
-; VI-DAG: v_cmp_lt_i64_e64 s{{\[[0-9]+:[0-9]+\]}}, -1, v{{\[}}[[X_LO]]:[[X_HI]]{{\]}}
-; VI-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, v[[Z_HI]], s
-; VI-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 2, v[[Z_LO]], s
+; VI-DAG: v_cmp_lt_i64_e32 vcc, -1, v{{\[}}[[X_LO]]:[[X_HI]]{{\]}}
+; VI-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v[[Z_HI]], vcc
+; VI-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v[[Z_LO]], vcc
define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %x.ptr, i64 addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
@@ -292,10 +292,10 @@ define amdgpu_kernel void @fcmp_k0_vgprX_select_k1_vgprZ_v4f32(<4 x float> addrs
; GCN-LABEL: {{^}}icmp_vgprX_k0_select_k1_vgprZ_i1:
; GCN: load_dword
; GCN: load_ubyte
-; GCN-DAG: v_cmp_gt_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, 0, v
+; GCN-DAG: v_cmp_gt_i32_e32 vcc, 0, v
; DCN-DAG: v_and_b32_e32 v{{[0-9]+}}, 1,
-; GCN-DAG: v_cmp_eq_u32_e32 vcc, 1, v
-; GCN-DAG: s_or_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, vcc
+; GCN-DAG: v_cmp_eq_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, 1, v
+; GCN-DAG: s_or_b64 s{{\[[0-9]+:[0-9]+\]}}, vcc, s{{\[[0-9]+:[0-9]+\]}}
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, s
; GCN: store_byte
define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i1(i1 addrspace(1)* %out, i32 addrspace(1)* %x.ptr, i1 addrspace(1)* %z.ptr) #0 {
diff --git a/test/CodeGen/AMDGPU/v_mac.ll b/test/CodeGen/AMDGPU/v_mac.ll
index 2b96f7d50076..da57155f33ef 100644
--- a/test/CodeGen/AMDGPU/v_mac.ll
+++ b/test/CodeGen/AMDGPU/v_mac.ll
@@ -1,12 +1,12 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=VI-FLUSH -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=VI-DENORM -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=VI-FLUSH -check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=+fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=VI-DENORM -check-prefix=GCN %s
; GCN-LABEL: {{^}}mac_vvv:
; GCN: buffer_load_dword [[A:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0{{$}}
; GCN: buffer_load_dword [[B:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0 offset:4
; GCN: buffer_load_dword [[C:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0 offset:8
-; GCN: v_mac_f32_e32 [[C]], [[B]], [[A]]
+; GCN: v_mac_f32_e32 [[C]], [[A]], [[B]]
; GCN: buffer_store_dword [[C]]
define amdgpu_kernel void @mac_vvv(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
@@ -135,7 +135,7 @@ entry:
; GCN-LABEL: {{^}}safe_mad_sub0_src0:
; GCN: v_sub_f32_e32 [[SUB0:v[0-9]+]], 0,
-; GCN: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[SUB0]]
+; GCN: v_mac_f32_e32 v{{[0-9]+}}, [[SUB0]], v{{[0-9]+}}
define amdgpu_kernel void @safe_mad_sub0_src0(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/AMDGPU/v_mac_f16.ll b/test/CodeGen/AMDGPU/v_mac_f16.ll
index ce4a69db3506..46c9b7ee1a3d 100644
--- a/test/CodeGen/AMDGPU/v_mac_f16.ll
+++ b/test/CodeGen/AMDGPU/v_mac_f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}mac_f16:
; GCN: {{buffer|flat}}_load_ushort v[[A_F16:[0-9]+]]
@@ -8,10 +8,10 @@
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
-; SI: v_mac_f32_e32 v[[C_F32]], v[[B_F32]], v[[A_F32]]
+; SI: v_mac_f32_e32 v[[C_F32]], v[[A_F32]], v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[C_F32]]
; SI: buffer_store_short v[[R_F16]]
-; VI: v_mac_f16_e32 v[[C_F16]], v[[B_F16]], v[[A_F16]]
+; VI: v_mac_f16_e32 v[[C_F16]], v[[A_F16]], v[[B_F16]]
; VI: buffer_store_short v[[C_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @mac_f16(
@@ -147,9 +147,9 @@ entry:
; GCN-LABEL: {{^}}mac_f16_neg_a_safe_fp_math:
; SI: v_sub_f32_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
-; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A]]
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A]], v{{[0-9]+}}
; VI: v_sub_f16_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
-; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A]]
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A]], v{{[0-9]+}}
; GCN: s_endpgm
define amdgpu_kernel void @mac_f16_neg_a_safe_fp_math(
half addrspace(1)* %r,
@@ -171,9 +171,9 @@ entry:
; GCN-LABEL: {{^}}mac_f16_neg_b_safe_fp_math:
; SI: v_sub_f32_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
-; SI: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A]], v{{[0-9]+}}
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A]]
; VI: v_sub_f16_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
-; VI: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A]], v{{[0-9]+}}
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A]]
; GCN: s_endpgm
define amdgpu_kernel void @mac_f16_neg_b_safe_fp_math(
half addrspace(1)* %r,
@@ -312,20 +312,20 @@ entry:
; SI: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
; SI-DAG: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
-; SI-DAG: v_mac_f32_e32 v[[C_F32_0]], v[[B_F32_0]], v[[A_F32_0]]
+; SI-DAG: v_mac_f32_e32 v[[C_F32_0]], v[[A_F32_0]], v[[B_F32_0]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_LO:[0-9]+]], v[[C_F32_0]]
-; SI-DAG: v_mac_f32_e32 v[[C_F32_1]], v[[B_F32_1]], v[[A_F32_1]]
+; SI-DAG: v_mac_f32_e32 v[[C_F32_1]], v[[A_F32_1]], v[[B_F32_1]]
; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[C_F32_1]]
; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
; VI-NOT: and
-; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_LO]], v[[R_F16_HI]]
; VI-DAG: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
; VI-DAG: v_mac_f16_sdwa v[[C_F16_1]], v[[A_V2_F16]], v[[B_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-DAG: v_mac_f16_e32 v[[C_V2_F16]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_mac_f16_e32 v[[C_V2_F16]], v[[A_V2_F16]], v[[B_V2_F16]]
; VI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[C_F16_1]]
; VI-NOT: and
-; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[C_V2_F16]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[C_V2_F16]], v[[R_F16_HI]]
; GCN: {{buffer|flat}}_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
@@ -481,14 +481,14 @@ entry:
; SI: v_sub_f32_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
; SI: v_sub_f32_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
-; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]]
-; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
+; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}}
+; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
; VI-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
; VI-DAG: v_sub_f16_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
; VI-DAG: v_sub_f16_sdwa v[[NEG_A0:[0-9]+]], [[ZERO]], v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-DAG: v_mac_f16_sdwa v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-DAG: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
+; VI-DAG: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
; GCN: s_endpgm
define amdgpu_kernel void @mac_v2f16_neg_a_safe_fp_math(
@@ -513,14 +513,14 @@ entry:
; SI: v_sub_f32_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
; SI: v_sub_f32_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
-; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}}
-; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
+; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]]
+; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
; VI: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
; VI: v_sub_f16_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
; VI: v_sub_f16_sdwa v[[NEG_A0:[0-9]+]], [[ZERO]], v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-DAG: v_mac_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-DAG: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
+; VI-DAG: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
; GCN: s_endpgm
define amdgpu_kernel void @mac_v2f16_neg_b_safe_fp_math(
diff --git a/test/CodeGen/AMDGPU/vectorize-global-local.ll b/test/CodeGen/AMDGPU/vectorize-global-local.ll
index 90cf34e609f6..381ff5b1b518 100644
--- a/test/CodeGen/AMDGPU/vectorize-global-local.ll
+++ b/test/CodeGen/AMDGPU/vectorize-global-local.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; CHECK-DAG: flat_load_dwordx4
; CHECK-DAG: flat_load_dwordx4
; CHECK-DAG: flat_load_dwordx4
diff --git a/test/CodeGen/AMDGPU/vop-shrink-frame-index.mir b/test/CodeGen/AMDGPU/vop-shrink-frame-index.mir
new file mode 100644
index 000000000000..f8a2339626cf
--- /dev/null
+++ b/test/CodeGen/AMDGPU/vop-shrink-frame-index.mir
@@ -0,0 +1,161 @@
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-shrink-instructions -o - %s | FileCheck -check-prefix=GCN %s
+--- |
+
+ define amdgpu_kernel void @fold_fi_vgpr() {
+ %alloca = alloca [4 x i32]
+ ret void
+ }
+
+ define amdgpu_kernel void @fold_vgpr_fi() {
+ %alloca = alloca [4 x i32]
+ ret void
+ }
+
+ define amdgpu_kernel void @fold_sgpr_fi() {
+ %alloca = alloca [4 x i32]
+ ret void
+ }
+
+ define amdgpu_kernel void @fold_fi_sgpr() {
+ %alloca = alloca [4 x i32]
+ ret void
+ }
+
+ define amdgpu_kernel void @fold_fi_imm() {
+ %alloca = alloca [4 x i32]
+ ret void
+ }
+
+ define amdgpu_kernel void @fold_imm_fi() {
+ %alloca = alloca [4 x i32]
+ ret void
+ }
+
+...
+# GCN-LABEL: name: fold_fi_vgpr{{$}}
+# GCN: %1 = IMPLICIT_DEF
+
+# GCN: %2 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec
+name: fold_fi_vgpr
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+stack:
+ - { id: 0, name: alloca, type: default, offset: 0, size: 128, alignment: 8,
+ callee-saved-register: '', local-offset: 0, di-variable: '', di-expression: '',
+ di-location: '' }
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+ %1 = IMPLICIT_DEF
+ %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec
+ S_ENDPGM
+
+...
+# GCN-LABEL: name: fold_vgpr_fi{{$}}
+# GCN: %1 = IMPLICIT_DEF
+# GCN: %2 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec
+name: fold_vgpr_fi
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+stack:
+ - { id: 0, name: alloca, type: default, offset: 0, size: 128, alignment: 8,
+ callee-saved-register: '', local-offset: 0, di-variable: '', di-expression: '',
+ di-location: '' }
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+ %1 = IMPLICIT_DEF
+ %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec
+ S_ENDPGM
+
+...
+# GCN-LABEL: name: fold_sgpr_fi{{$}}
+# GCN: %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+# GCN: %1 = IMPLICIT_DEF
+# GCN: %2 = V_ADD_I32_e32 %1, %0, implicit-def %vcc, implicit %exec
+name: fold_sgpr_fi
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: sgpr_32 }
+ - { id: 2, class: vgpr_32 }
+stack:
+ - { id: 0, name: alloca, type: default, offset: 0, size: 128, alignment: 8,
+ callee-saved-register: '', local-offset: 0, di-variable: '', di-expression: '',
+ di-location: '' }
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+ %1 = IMPLICIT_DEF
+ %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec
+ S_ENDPGM
+
+...
+# GCN-LABEL: name: fold_fi_sgpr{{$}}
+# GCN: %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+# GCN: %1 = IMPLICIT_DEF
+# GCN: %2 = V_ADD_I32_e32 %1, %0, implicit-def %vcc, implicit %exec
+name: fold_fi_sgpr
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: sgpr_32 }
+ - { id: 2, class: vgpr_32 }
+stack:
+ - { id: 0, name: alloca, type: default, offset: 0, size: 128, alignment: 8,
+ callee-saved-register: '', local-offset: 0, di-variable: '', di-expression: '',
+ di-location: '' }
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+ %1 = IMPLICIT_DEF
+ %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec
+ S_ENDPGM
+...
+# TODO: Should probably prefer folding immediate first
+# GCN-LABEL: name: fold_fi_imm{{$}}
+# GCN: %1 = V_MOV_B32_e32 999, implicit %exec
+# GCN: %2 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec
+name: fold_fi_imm
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+stack:
+ - { id: 0, name: alloca, type: default, offset: 0, size: 128, alignment: 8,
+ callee-saved-register: '', local-offset: 0, di-variable: '', di-expression: '',
+ di-location: '' }
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+ %1 = V_MOV_B32_e32 999, implicit %exec
+ %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec
+ S_ENDPGM
+
+...
+# GCN-LABEL: name: fold_imm_fi{{$}}
+# GCN: %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+# GCN: %2 = V_ADD_I32_e32 999, %0, implicit-def %vcc, implicit %exec
+name: fold_imm_fi
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+stack:
+ - { id: 0, name: alloca, type: default, offset: 0, size: 128, alignment: 8,
+ callee-saved-register: '', local-offset: 0, di-variable: '', di-expression: '',
+ di-location: '' }
+body: |
+ bb.0:
+ %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+ %1 = V_MOV_B32_e32 999, implicit %exec
+ %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec
+ S_ENDPGM
diff --git a/test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir b/test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir
new file mode 100644
index 000000000000..b4c0c93347c2
--- /dev/null
+++ b/test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir
@@ -0,0 +1,40 @@
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-shrink-instructions -o - %s | FileCheck -check-prefix=GCN %s
+...
+# GCN-LABEL: name: fold_imm_non_ssa{{$}}
+# GCN: %0 = V_MOV_B32_e32 123, implicit %exec
+# GCN: %2 = V_ADD_I32_e32 456, %0, implicit-def %vcc, implicit %exec
+
+name: fold_imm_non_ssa
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: sreg_64 }
+body: |
+ bb.0:
+ %0 = COPY undef %0
+ %0 = V_MOV_B32_e32 123, implicit %exec
+ %1 = V_MOV_B32_e32 456, implicit %exec
+ %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec
+ S_ENDPGM
+
+...
+# GCN-LABEL: name: fold_partially_defined_superreg{{$}}
+# GCN: %1 = V_MOV_B32_e32 456, implicit %exec
+# GCN: %2 = V_ADD_I32_e32 123, %1, implicit-def %vcc, implicit %exec
+name: fold_partially_defined_superreg
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: vreg_64 }
+body: |
+ bb.0:
+ undef %3.sub0 = V_MOV_B32_e32 123, implicit %exec, implicit-def %3
+ %1 = V_MOV_B32_e32 456, implicit %exec
+ %2, %vcc = V_ADD_I32_e64 %3.sub0, %1, implicit %exec
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/AMDGPU/vselect.ll b/test/CodeGen/AMDGPU/vselect.ll
index bb6234729f90..02ffd30be5fd 100644
--- a/test/CodeGen/AMDGPU/vselect.ll
+++ b/test/CodeGen/AMDGPU/vselect.ll
@@ -7,7 +7,9 @@
; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Z
; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Y
-; SI: v_cndmask_b32_e64
+; SI: v_cmp_gt_i32_e32 vcc
+; SI: v_cndmask_b32_e32
+; SI: v_cmp_gt_i32_e32 vcc
; SI: v_cndmask_b32_e32
define amdgpu_kernel void @test_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1, <2 x i32> %val) {
@@ -25,8 +27,11 @@ entry:
; EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI: v_cndmask_b32_e64
-;SI: v_cndmask_b32_e32
+
+; SI: v_cmp_neq_f32_e32 vcc
+; SI: v_cndmask_b32_e32
+; SI: v_cmp_neq_f32_e32 vcc
+; SI: v_cndmask_b32_e32
define amdgpu_kernel void @test_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in0, <2 x float> addrspace(1)* %in1) {
entry:
@@ -45,12 +50,10 @@ entry:
; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Z
; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Y
-; FIXME: The shrinking does not happen on tonga
-
-; SI: v_cndmask_b32
-; SI: v_cndmask_b32
-; SI: v_cndmask_b32
-; SI: v_cndmask_b32
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e32
define amdgpu_kernel void @test_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1, <4 x i32> %val) {
entry:
@@ -68,6 +71,10 @@ entry:
;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e32
define amdgpu_kernel void @test_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in0, <4 x float> addrspace(1)* %in1) {
entry:
%0 = load <4 x float>, <4 x float> addrspace(1)* %in0
diff --git a/test/CodeGen/AMDGPU/waitcnt-permute.mir b/test/CodeGen/AMDGPU/waitcnt-permute.mir
index 44dbd38f2d30..5612c7cac00b 100644
--- a/test/CodeGen/AMDGPU/waitcnt-permute.mir
+++ b/test/CodeGen/AMDGPU/waitcnt-permute.mir
@@ -1,18 +1,6 @@
# RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs -run-pass si-insert-waits -o - %s | FileCheck %s
---- |
- define float @waitcnt-permute(i32 %x, i32 %y) {
- entry:
- %0 = call i32 @llvm.amdgcn.ds.bpermute(i32 %x, i32 %y)
- %1 = bitcast i32 %0 to float
- %2 = fadd float 1.000000e+00, %1
- ret float %2
- }
-
- declare i32 @llvm.amdgcn.ds.bpermute(i32, i32)
-
...
----
# CHECK-LABEL: name: waitcnt-permute{{$}}
# CHECK: DS_BPERMUTE_B32
# CHECK-NEXT: S_WAITCNT 127
diff --git a/test/CodeGen/AMDGPU/xor.ll b/test/CodeGen/AMDGPU/xor.ll
index 57a082a0170c..847a1d739321 100644
--- a/test/CodeGen/AMDGPU/xor.ll
+++ b/test/CodeGen/AMDGPU/xor.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}xor_v2i32:
@@ -60,7 +60,7 @@ define amdgpu_kernel void @xor_i1(float addrspace(1)* %out, float addrspace(1)*
; FUNC-LABEL: {{^}}v_xor_i1:
; SI: buffer_load_ubyte [[B:v[0-9]+]]
; SI: buffer_load_ubyte [[A:v[0-9]+]]
-; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[A]], [[B]]
+; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[B]], [[A]]
; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]]
; SI: buffer_store_byte [[RESULT]]
define amdgpu_kernel void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
diff --git a/test/CodeGen/AMDGPU/zext-i64-bit-operand.ll b/test/CodeGen/AMDGPU/zext-i64-bit-operand.ll
index a902234898cd..69c42afb9ad5 100644
--- a/test/CodeGen/AMDGPU/zext-i64-bit-operand.ll
+++ b/test/CodeGen/AMDGPU/zext-i64-bit-operand.ll
@@ -6,7 +6,7 @@
; GCN-NOT: _or_
; GCN-NOT: v[[HI]]
; GCN-NOT: v_mov_b32_e32 v{{[0-9]+}}, 0
-; GCN: v_or_b32_e32 v[[LO]], v[[LD32]], v[[LO]]
+; GCN: v_or_b32_e32 v[[LO]], v[[LO]], v[[LD32]]
; GCN-NOT: _or_
; GCN-NOT: v[[HI]]
; GCN-NOT: v_mov_b32_e32 v{{[0-9]+}}, 0
@@ -26,7 +26,7 @@ define amdgpu_kernel void @zext_or_operand_i64(i64 addrspace(1)* %out, i64 addrs
; GCN-NOT: _or_
; GCN-NOT: v[[HI]]
; GCN-NOT: v_mov_b32_e32 v{{[0-9]+}}, 0
-; GCN: v_or_b32_e32 v[[LO]], v[[LD32]], v[[LO]]
+; GCN: v_or_b32_e32 v[[LO]], v[[LO]], v[[LD32]]
; GCN-NOT: v[[HI]]
; GCN-NOT: _or_
; GCN-NOT: v_mov_b32_e32 v{{[0-9]+}}, 0
diff --git a/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll b/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
index 9dcfe5007c00..ed5255bfbebd 100644
--- a/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
+++ b/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
@@ -6,23 +6,23 @@
; CHECK: ** List Scheduling
; CHECK: SU(2){{.*}}STR{{.*}}Volatile
-; CHECK-NOT: ord SU
-; CHECK: ord SU(3): Latency=1
-; CHECK-NOT: ord SU
+; CHECK-NOT: SU({{.*}}): Ord
+; CHECK: SU(3): Ord Latency=1
+; CHECK-NOT: SU({{.*}}): Ord
; CHECK: SU(3){{.*}}LDR{{.*}}Volatile
-; CHECK-NOT: ord SU
-; CHECK: ord SU(2): Latency=1
-; CHECK-NOT: ord SU
+; CHECK-NOT: SU({{.*}}): Ord
+; CHECK: SU(2): Ord Latency=1
+; CHECK-NOT: SU({{.*}}): Ord
; CHECK: Successors:
; CHECK: ** List Scheduling
; CHECK: SU(2){{.*}}STR{{.*}}
-; CHECK-NOT: ord SU
-; CHECK: ord SU(3): Latency=1
-; CHECK-NOT: ord SU
+; CHECK-NOT: SU({{.*}}): Ord
+; CHECK: SU(3): Ord Latency=1
+; CHECK-NOT: SU({{.*}}): Ord
; CHECK: SU(3){{.*}}LDR{{.*}}
-; CHECK-NOT: ord SU
-; CHECK: ord SU(2): Latency=1
-; CHECK-NOT: ord SU
+; CHECK-NOT: SU({{.*}}): Ord
+; CHECK: SU(2): Ord Latency=1
+; CHECK-NOT: SU({{.*}}): Ord
; CHECK: Successors:
define i32 @f1(i32* nocapture %p1, i32* nocapture %p2) nounwind {
entry:
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir
index 111375ece51b..6c8bc7123a1a 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir
@@ -10,6 +10,46 @@
define void @test_icmp_sge_s32() { ret void }
define void @test_icmp_slt_s32() { ret void }
define void @test_icmp_sle_s32() { ret void }
+
+ define void @test_fcmp_true_s32() #0 { ret void }
+ define void @test_fcmp_false_s32() #0 { ret void }
+
+ define void @test_fcmp_oeq_s32() #0 { ret void }
+ define void @test_fcmp_ogt_s32() #0 { ret void }
+ define void @test_fcmp_oge_s32() #0 { ret void }
+ define void @test_fcmp_olt_s32() #0 { ret void }
+ define void @test_fcmp_ole_s32() #0 { ret void }
+ define void @test_fcmp_ord_s32() #0 { ret void }
+ define void @test_fcmp_ugt_s32() #0 { ret void }
+ define void @test_fcmp_uge_s32() #0 { ret void }
+ define void @test_fcmp_ult_s32() #0 { ret void }
+ define void @test_fcmp_ule_s32() #0 { ret void }
+ define void @test_fcmp_une_s32() #0 { ret void }
+ define void @test_fcmp_uno_s32() #0 { ret void }
+
+ define void @test_fcmp_one_s32() #0 { ret void }
+ define void @test_fcmp_ueq_s32() #0 { ret void }
+
+ define void @test_fcmp_true_s64() #0 { ret void }
+ define void @test_fcmp_false_s64() #0 { ret void }
+
+ define void @test_fcmp_oeq_s64() #0 { ret void }
+ define void @test_fcmp_ogt_s64() #0 { ret void }
+ define void @test_fcmp_oge_s64() #0 { ret void }
+ define void @test_fcmp_olt_s64() #0 { ret void }
+ define void @test_fcmp_ole_s64() #0 { ret void }
+ define void @test_fcmp_ord_s64() #0 { ret void }
+ define void @test_fcmp_ugt_s64() #0 { ret void }
+ define void @test_fcmp_uge_s64() #0 { ret void }
+ define void @test_fcmp_ult_s64() #0 { ret void }
+ define void @test_fcmp_ule_s64() #0 { ret void }
+ define void @test_fcmp_une_s64() #0 { ret void }
+ define void @test_fcmp_uno_s64() #0 { ret void }
+
+ define void @test_fcmp_one_s64() #0 { ret void }
+ define void @test_fcmp_ueq_s64() #0 { ret void }
+
+ attributes #0 = { "target-features"="+vfp2" }
...
---
name: test_icmp_eq_s32
@@ -35,8 +75,8 @@ body: |
%2(s1) = G_ICMP intpred(eq), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 0, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 0, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -71,8 +111,8 @@ body: |
%2(s1) = G_ICMP intpred(ne), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 1, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 1, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -107,8 +147,8 @@ body: |
%2(s1) = G_ICMP intpred(ugt), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 8, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 8, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -143,8 +183,8 @@ body: |
%2(s1) = G_ICMP intpred(uge), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 2, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 2, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -179,8 +219,8 @@ body: |
%2(s1) = G_ICMP intpred(ult), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 3, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 3, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -215,8 +255,8 @@ body: |
%2(s1) = G_ICMP intpred(ule), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 9, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 9, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -251,8 +291,8 @@ body: |
%2(s1) = G_ICMP intpred(sgt), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 12, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 12, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -287,8 +327,8 @@ body: |
%2(s1) = G_ICMP intpred(sge), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 10, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 10, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -323,8 +363,8 @@ body: |
%2(s1) = G_ICMP intpred(slt), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 11, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 11, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
@@ -359,8 +399,1180 @@ body: |
%2(s1) = G_ICMP intpred(sle), %0(s32), %1
; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
- ; CHECK: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 13, %cpsr
+ ; CHECK-NEXT: CMPrr [[VREGX]], [[VREGY]], 14, _, implicit-def %cpsr
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 13, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_true_s32
+# CHECK-LABEL: name: test_fcmp_true_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(true), %0(s32), %1
+ ; CHECK: [[RES:%[0-9]+]] = MOVi 1, 14, _, _
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_false_s32
+# CHECK-LABEL: name: test_fcmp_false_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(false), %0(s32), %1
+ ; CHECK: [[RES:%[0-9]+]] = MOVi 0, 14, _, _
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_oeq_s32
+# CHECK-LABEL: name: test_fcmp_oeq_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(oeq), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 0, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ogt_s32
+# CHECK-LABEL: name: test_fcmp_ogt_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(ogt), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 12, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_oge_s32
+# CHECK-LABEL: name: test_fcmp_oge_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(oge), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 10, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_olt_s32
+# CHECK-LABEL: name: test_fcmp_olt_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(olt), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 4, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ole_s32
+# CHECK-LABEL: name: test_fcmp_ole_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(ole), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 9, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ord_s32
+# CHECK-LABEL: name: test_fcmp_ord_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(ord), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 7, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ugt_s32
+# CHECK-LABEL: name: test_fcmp_ugt_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(ugt), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 8, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_uge_s32
+# CHECK-LABEL: name: test_fcmp_uge_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(uge), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 5, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ult_s32
+# CHECK-LABEL: name: test_fcmp_ult_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(ult), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 11, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ule_s32
+# CHECK-LABEL: name: test_fcmp_ule_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(ule), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 13, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_une_s32
+# CHECK-LABEL: name: test_fcmp_une_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(une), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 1, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_uno_s32
+# CHECK-LABEL: name: test_fcmp_uno_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(uno), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 6, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_one_s32
+# CHECK-LABEL: name: test_fcmp_one_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(one), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES1:%[0-9]+]] = MOVCCi [[ZERO]], 1, 12, %cpsr
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[RES1]], 1, 4, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ueq_s32
+# CHECK-LABEL: name: test_fcmp_ueq_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s1) = G_FCMP floatpred(ueq), %0(s32), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES1:%[0-9]+]] = MOVCCi [[ZERO]], 1, 0, %cpsr
+ ; CHECK-NEXT: VCMPS [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[RES1]], 1, 6, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_true_s64
+# CHECK-LABEL: name: test_fcmp_true_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(true), %0(s64), %1
+ ; CHECK: [[RES:%[0-9]+]] = MOVi 1, 14, _, _
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_false_s64
+# CHECK-LABEL: name: test_fcmp_false_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(false), %0(s64), %1
+ ; CHECK: [[RES:%[0-9]+]] = MOVi 0, 14, _, _
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_oeq_s64
+# CHECK-LABEL: name: test_fcmp_oeq_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(oeq), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 0, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ogt_s64
+# CHECK-LABEL: name: test_fcmp_ogt_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(ogt), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 12, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_oge_s64
+# CHECK-LABEL: name: test_fcmp_oge_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(oge), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 10, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_olt_s64
+# CHECK-LABEL: name: test_fcmp_olt_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(olt), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 4, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ole_s64
+# CHECK-LABEL: name: test_fcmp_ole_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(ole), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 9, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ord_s64
+# CHECK-LABEL: name: test_fcmp_ord_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(ord), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 7, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ugt_s64
+# CHECK-LABEL: name: test_fcmp_ugt_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(ugt), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 8, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_uge_s64
+# CHECK-LABEL: name: test_fcmp_uge_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(uge), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 5, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ult_s64
+# CHECK-LABEL: name: test_fcmp_ult_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(ult), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 11, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ule_s64
+# CHECK-LABEL: name: test_fcmp_ule_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(ule), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 13, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_une_s64
+# CHECK-LABEL: name: test_fcmp_une_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(une), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 1, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_uno_s64
+# CHECK-LABEL: name: test_fcmp_uno_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(uno), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[ZERO]], 1, 6, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_one_s64
+# CHECK-LABEL: name: test_fcmp_one_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(one), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES1:%[0-9]+]] = MOVCCi [[ZERO]], 1, 12, %cpsr
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[RES1]], 1, 4, %cpsr
+
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RET]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ueq_s64
+# CHECK-LABEL: name: test_fcmp_ueq_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s1) = G_FCMP floatpred(ueq), %0(s64), %1
+ ; CHECK: [[ZERO:%[0-9]+]] = MOVi 0, 14, _, _
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES1:%[0-9]+]] = MOVCCi [[ZERO]], 1, 0, %cpsr
+ ; CHECK-NEXT: VCMPD [[VREGX]], [[VREGY]], 14, _, implicit-def %fpscr_nzcv
+ ; CHECK-NEXT: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK-NEXT: [[RES:%[0-9]+]] = MOVCCi [[RES1]], 1, 6, %cpsr
%3(s32) = G_ZEXT %2(s1)
; CHECK: [[RET:%[0-9]+]] = ANDri [[RES]], 1, 14, _, _
diff --git a/test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll b/test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll
index 7d021fdb43dd..98b39e444ac7 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll
@@ -49,3 +49,33 @@ define arm_aapcscc double @test_add_double(double %x, double %y) {
%r = fadd double %x, %y
ret double %r
}
+
+define arm_aapcs_vfpcc i32 @test_cmp_float_ogt(float %x, float %y) {
+; CHECK-LABEL: test_cmp_float_ogt
+; HARD: vcmp.f32
+; HARD: vmrs APSR_nzcv, fpscr
+; HARD-NEXT: movgt
+; SOFT-AEABI: blx __aeabi_fcmpgt
+; SOFT-DEFAULT: blx __gtsf2
+entry:
+ %v = fcmp ogt float %x, %y
+ %r = zext i1 %v to i32
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @test_cmp_float_one(float %x, float %y) {
+; CHECK-LABEL: test_cmp_float_one
+; HARD: vcmp.f32
+; HARD: vmrs APSR_nzcv, fpscr
+; HARD: movgt
+; HARD-NOT: vcmp
+; HARD: movmi
+; SOFT-AEABI-DAG: blx __aeabi_fcmpgt
+; SOFT-AEABI-DAG: blx __aeabi_fcmplt
+; SOFT-DEFAULT-DAG: blx __gtsf2
+; SOFT-DEFAULT-DAG: blx __ltsf2
+entry:
+ %v = fcmp one float %x, %y
+ %r = zext i1 %v to i32
+ ret i32 %r
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
index c93e7fa0ec56..9a0877846fc3 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
@@ -36,6 +36,7 @@ body: |
%0(s32) = COPY %r0
%1(s32) = COPY %r1
; HWDIV: [[R:%[0-9]+]](s32) = G_SDIV [[X]], [[Y]]
+ ; SOFT-NOT: G_SDIV
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X]]
; SOFT-DAG: %r1 = COPY [[Y]]
@@ -44,6 +45,7 @@ body: |
; SOFT-DEFAULT: BLX $__divsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT-DEFAULT: [[R:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_SDIV
%2(s32) = G_SDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
@@ -70,6 +72,7 @@ body: |
%0(s32) = COPY %r0
%1(s32) = COPY %r1
; HWDIV: [[R:%[0-9]+]](s32) = G_UDIV [[X]], [[Y]]
+ ; SOFT-NOT: G_UDIV
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X]]
; SOFT-DAG: %r1 = COPY [[Y]]
@@ -78,6 +81,7 @@ body: |
; SOFT-DEFAULT: BLX $__udivsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT-DEFAULT: [[R:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_UDIV
%2(s32) = G_UDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
@@ -106,6 +110,7 @@ body: |
%0(s16) = COPY %r0
%1(s16) = COPY %r1
; HWDIV: [[R32:%[0-9]+]](s32) = G_SDIV [[X32]], [[Y32]]
+ ; SOFT-NOT: G_SDIV
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X32]]
; SOFT-DAG: %r1 = COPY [[Y32]]
@@ -114,7 +119,9 @@ body: |
; SOFT-DEFAULT: BLX $__divsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT-DEFAULT: [[R32:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_SDIV
; CHECK: [[R:%[0-9]+]](s16) = G_TRUNC [[R32]]
+ ; SOFT-NOT: G_SDIV
%2(s16) = G_SDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s16)
@@ -143,6 +150,7 @@ body: |
%0(s16) = COPY %r0
%1(s16) = COPY %r1
; HWDIV: [[R32:%[0-9]+]](s32) = G_UDIV [[X32]], [[Y32]]
+ ; SOFT-NOT: G_UDIV
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X32]]
; SOFT-DAG: %r1 = COPY [[Y32]]
@@ -151,7 +159,9 @@ body: |
; SOFT-DEFAULT: BLX $__udivsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT-DEFAULT: [[R32:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_UDIV
; CHECK: [[R:%[0-9]+]](s16) = G_TRUNC [[R32]]
+ ; SOFT-NOT: G_UDIV
%2(s16) = G_UDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s16)
@@ -180,6 +190,7 @@ body: |
%0(s8) = COPY %r0
%1(s8) = COPY %r1
; HWDIV: [[R32:%[0-9]+]](s32) = G_SDIV [[X32]], [[Y32]]
+ ; SOFT-NOT: G_SDIV
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X32]]
; SOFT-DAG: %r1 = COPY [[Y32]]
@@ -188,7 +199,9 @@ body: |
; SOFT-DEFAULT: BLX $__divsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT-DEFAULT: [[R32:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_SDIV
; CHECK: [[R:%[0-9]+]](s8) = G_TRUNC [[R32]]
+ ; SOFT-NOT: G_SDIV
%2(s8) = G_SDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s8)
@@ -217,6 +230,7 @@ body: |
%0(s8) = COPY %r0
%1(s8) = COPY %r1
; HWDIV: [[R32:%[0-9]+]](s32) = G_UDIV [[X32]], [[Y32]]
+ ; SOFT-NOT: G_UDIV
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X32]]
; SOFT-DAG: %r1 = COPY [[Y32]]
@@ -225,7 +239,9 @@ body: |
; SOFT-DEFAULT: BLX $__udivsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT-DEFAULT: [[R32:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_UDIV
; CHECK: [[R:%[0-9]+]](s8) = G_TRUNC [[R32]]
+ ; SOFT-NOT: G_UDIV
%2(s8) = G_UDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s8)
@@ -254,6 +270,7 @@ body: |
; HWDIV: [[Q:%[0-9]+]](s32) = G_SDIV [[X]], [[Y]]
; HWDIV: [[P:%[0-9]+]](s32) = G_MUL [[Q]], [[Y]]
; HWDIV: [[R:%[0-9]+]](s32) = G_SUB [[X]], [[P]]
+ ; SOFT-NOT: G_SREM
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X]]
; SOFT-DAG: %r1 = COPY [[Y]]
@@ -262,6 +279,7 @@ body: |
; SOFT-DEFAULT: BLX $__modsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT-DEFAULT: [[R:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_SREM
%2(s32) = G_SREM %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
@@ -290,6 +308,7 @@ body: |
; HWDIV: [[Q:%[0-9]+]](s32) = G_UDIV [[X]], [[Y]]
; HWDIV: [[P:%[0-9]+]](s32) = G_MUL [[Q]], [[Y]]
; HWDIV: [[R:%[0-9]+]](s32) = G_SUB [[X]], [[P]]
+ ; SOFT-NOT: G_UREM
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X]]
; SOFT-DAG: %r1 = COPY [[Y]]
@@ -298,6 +317,7 @@ body: |
; SOFT-DEFAULT: BLX $__umodsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT-DEFAULT: [[R:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_UREM
%2(s32) = G_UREM %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
index 803135ba595e..cb61f95b10ce 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
@@ -10,6 +10,44 @@
define void @test_fadd_float() { ret void }
define void @test_fadd_double() { ret void }
+
+ define void @test_fcmp_true_s32() { ret void }
+ define void @test_fcmp_false_s32() { ret void }
+
+ define void @test_fcmp_oeq_s32() { ret void }
+ define void @test_fcmp_ogt_s32() { ret void }
+ define void @test_fcmp_oge_s32() { ret void }
+ define void @test_fcmp_olt_s32() { ret void }
+ define void @test_fcmp_ole_s32() { ret void }
+ define void @test_fcmp_ord_s32() { ret void }
+ define void @test_fcmp_ugt_s32() { ret void }
+ define void @test_fcmp_uge_s32() { ret void }
+ define void @test_fcmp_ult_s32() { ret void }
+ define void @test_fcmp_ule_s32() { ret void }
+ define void @test_fcmp_une_s32() { ret void }
+ define void @test_fcmp_uno_s32() { ret void }
+
+ define void @test_fcmp_one_s32() { ret void }
+ define void @test_fcmp_ueq_s32() { ret void }
+
+ define void @test_fcmp_true_s64() { ret void }
+ define void @test_fcmp_false_s64() { ret void }
+
+ define void @test_fcmp_oeq_s64() { ret void }
+ define void @test_fcmp_ogt_s64() { ret void }
+ define void @test_fcmp_oge_s64() { ret void }
+ define void @test_fcmp_olt_s64() { ret void }
+ define void @test_fcmp_ole_s64() { ret void }
+ define void @test_fcmp_ord_s64() { ret void }
+ define void @test_fcmp_ugt_s64() { ret void }
+ define void @test_fcmp_uge_s64() { ret void }
+ define void @test_fcmp_ult_s64() { ret void }
+ define void @test_fcmp_ule_s64() { ret void }
+ define void @test_fcmp_une_s64() { ret void }
+ define void @test_fcmp_uno_s64() { ret void }
+
+ define void @test_fcmp_one_s64() { ret void }
+ define void @test_fcmp_ueq_s64() { ret void }
...
---
name: test_frem_float
@@ -31,6 +69,7 @@ body: |
; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
%0(s32) = COPY %r0
%1(s32) = COPY %r1
+ ; CHECK-NOT: G_FREM
; CHECK: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X]]
; SOFT-DAG: %r1 = COPY [[Y]]
@@ -41,6 +80,7 @@ body: |
; SOFT: [[R:%[0-9]+]](s32) = COPY %r0
; HARD: [[R:%[0-9]+]](s32) = COPY %s0
; CHECK: ADJCALLSTACKUP
+ ; CHECK-NOT: G_FREM
%2(s32) = G_FREM %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
@@ -86,6 +126,7 @@ body: |
; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]]
%4(s64) = G_MERGE_VALUES %0(s32), %1(s32)
%5(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ ; CHECK-NOT: G_FREM
; CHECK: ADJCALLSTACKDOWN
; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]]
; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]]
@@ -96,6 +137,7 @@ body: |
; SOFT: BLX $fmod, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
; HARD: BLX $fmod, {{.*}}, implicit %d0, implicit %d1, implicit-def %d0
; CHECK: ADJCALLSTACKUP
+ ; CHECK-NOT: G_FREM
%6(s64) = G_FREM %4, %5
%7(s32), %8(s32) = G_UNMERGE_VALUES %6(s64)
%r0 = COPY %7(s32)
@@ -122,6 +164,7 @@ body: |
; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
%0(s32) = COPY %r0
%1(s32) = COPY %r1
+ ; CHECK-NOT: G_FPOW
; CHECK: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X]]
; SOFT-DAG: %r1 = COPY [[Y]]
@@ -132,6 +175,7 @@ body: |
; SOFT: [[R:%[0-9]+]](s32) = COPY %r0
; HARD: [[R:%[0-9]+]](s32) = COPY %s0
; CHECK: ADJCALLSTACKUP
+ ; CHECK-NOT: G_FPOW
%2(s32) = G_FPOW %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
@@ -177,6 +221,7 @@ body: |
; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]]
%4(s64) = G_MERGE_VALUES %0(s32), %1(s32)
%5(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ ; CHECK-NOT: G_FPOW
; CHECK: ADJCALLSTACKDOWN
; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]]
; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]]
@@ -187,6 +232,7 @@ body: |
; SOFT: BLX $pow, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
; HARD: BLX $pow, {{.*}}, implicit %d0, implicit %d1, implicit-def %d0
; CHECK: ADJCALLSTACKUP
+ ; CHECK-NOT: G_FPOW
%6(s64) = G_FPOW %4, %5
%7(s32), %8(s32) = G_UNMERGE_VALUES %6(s64)
%r0 = COPY %7(s32)
@@ -214,6 +260,7 @@ body: |
%0(s32) = COPY %r0
%1(s32) = COPY %r1
; HARD: [[R:%[0-9]+]](s32) = G_FADD [[X]], [[Y]]
+ ; SOFT-NOT: G_FADD
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r0 = COPY [[X]]
; SOFT-DAG: %r1 = COPY [[Y]]
@@ -221,6 +268,7 @@ body: |
; SOFT-DEFAULT: BLX $__addsf3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
; SOFT: [[R:%[0-9]+]](s32) = COPY %r0
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_FADD
%2(s32) = G_FADD %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
@@ -261,6 +309,7 @@ body: |
%4(s64) = G_MERGE_VALUES %0(s32), %1(s32)
%5(s64) = G_MERGE_VALUES %2(s32), %3(s32)
; HARD: [[R:%[0-9]+]](s64) = G_FADD [[X]], [[Y]]
+ ; SOFT-NOT: G_FADD
; SOFT: ADJCALLSTACKDOWN
; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]]
; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]]
@@ -269,6 +318,7 @@ body: |
; SOFT-AEABI: BLX $__aeabi_dadd, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
; SOFT-DEFAULT: BLX $__adddf3, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_FADD
%6(s64) = G_FADD %4, %5
; HARD-DAG: G_UNMERGE_VALUES [[R]](s64)
%7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64)
@@ -276,3 +326,1565 @@ body: |
%r1 = COPY %8(s32)
BX_RET 14, _, implicit %r0, implicit %r1
...
+---
+name: test_fcmp_true_s32
+# CHECK-LABEL: name: test_fcmp_true_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(true), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(true), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: [[REXT:%[0-9]+]](s32) = G_CONSTANT i32 -1
+ ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]](s32)
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_false_s32
+# CHECK-LABEL: name: test_fcmp_false_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(false), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(false), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: [[REXT:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]](s32)
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_oeq_s32
+# CHECK-LABEL: name: test_fcmp_oeq_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(oeq), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(oeq), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__eqsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ogt_s32
+# CHECK-LABEL: name: test_fcmp_ogt_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(ogt), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ogt), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gtsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sgt), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_oge_s32
+# CHECK-LABEL: name: test_fcmp_oge_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(oge), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(oge), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpge, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sge), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_olt_s32
+# CHECK-LABEL: name: test_fcmp_olt_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(olt), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(olt), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmplt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__ltsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(slt), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ole_s32
+# CHECK-LABEL: name: test_fcmp_ole_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(ole), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ole), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmple, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__lesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sle), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ord_s32
+# CHECK-LABEL: name: test_fcmp_ord_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(ord), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ord), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpun, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__unordsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ugt_s32
+# CHECK-LABEL: name: test_fcmp_ugt_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(ugt), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ugt), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmple, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__lesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sgt), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_uge_s32
+# CHECK-LABEL: name: test_fcmp_uge_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(uge), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(uge), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmplt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__ltsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sge), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ult_s32
+# CHECK-LABEL: name: test_fcmp_ult_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(ult), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ult), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpge, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(slt), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ule_s32
+# CHECK-LABEL: name: test_fcmp_ule_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(ule), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ule), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gtsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sle), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_une_s32
+# CHECK-LABEL: name: test_fcmp_une_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(une), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(une), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__nesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(ne), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_uno_s32
+# CHECK-LABEL: name: test_fcmp_uno_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(uno), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(uno), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpun, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__unordsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(ne), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_one_s32
+# CHECK-LABEL: name: test_fcmp_one_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(one), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(one), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gtsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET1:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R1:%[0-9]+]](s1) = G_TRUNC [[RET1]]
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R1:%[0-9]+]](s1) = G_ICMP intpred(sgt), [[RET1]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmplt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__ltsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET2:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R2:%[0-9]+]](s1) = G_TRUNC [[RET2]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R2:%[0-9]+]](s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO]]
+ ; SOFT-DAG: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]]
+ ; SOFT-DAG: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]]
+ ; SOFT: [[REXT:%[0-9]+]](s32) = G_OR [[R1EXT]], [[R2EXT]]
+ ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ueq_s32
+# CHECK-LABEL: name: test_fcmp_ueq_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %2(s1) = G_FCMP floatpred(ueq), %0(s32), %1
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ueq), [[X]](s32), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__eqsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET1:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R1:%[0-9]+]](s1) = G_TRUNC [[RET1]]
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R1:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET1]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fcmpun, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__unordsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[RET2:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R2:%[0-9]+]](s1) = G_TRUNC [[RET2]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R2:%[0-9]+]](s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO]]
+ ; SOFT-DAG: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]]
+ ; SOFT-DAG: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]]
+ ; SOFT: [[REXT:%[0-9]+]](s32) = G_OR [[R1EXT]], [[R2EXT]]
+ ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]]
+ ; SOFT-NOT: G_FCMP
+ %3(s32) = G_ZEXT %2(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_true_s64
+# CHECK-LABEL: name: test_fcmp_true_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(true), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(true), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: [[REXT:%[0-9]+]](s32) = G_CONSTANT i32 -1
+ ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]](s32)
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_false_s64
+# CHECK-LABEL: name: test_fcmp_false_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(false), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(false), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: [[REXT:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]](s32)
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_oeq_s64
+# CHECK-LABEL: name: test_fcmp_oeq_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(oeq), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(oeq), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__eqdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ogt_s64
+# CHECK-LABEL: name: test_fcmp_ogt_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(ogt), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ogt), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gtdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sgt), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_oge_s64
+# CHECK-LABEL: name: test_fcmp_oge_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(oge), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(oge), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpge, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gedf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sge), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_olt_s64
+# CHECK-LABEL: name: test_fcmp_olt_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(olt), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(olt), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmplt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__ltdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(slt), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ole_s64
+# CHECK-LABEL: name: test_fcmp_ole_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(ole), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ole), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmple, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__ledf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sle), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ord_s64
+# CHECK-LABEL: name: test_fcmp_ord_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(ord), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ord), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpun, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__unorddf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ugt_s64
+# CHECK-LABEL: name: test_fcmp_ugt_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(ugt), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ugt), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmple, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__ledf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sgt), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_uge_s64
+# CHECK-LABEL: name: test_fcmp_uge_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(uge), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(uge), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmplt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__ltdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sge), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ult_s64
+# CHECK-LABEL: name: test_fcmp_ult_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(ult), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ult), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpge, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gedf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(slt), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ule_s64
+# CHECK-LABEL: name: test_fcmp_ule_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(ule), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ule), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gtdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(sle), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_une_s64
+# CHECK-LABEL: name: test_fcmp_une_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(une), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(une), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__nedf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]]
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(ne), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_uno_s64
+# CHECK-LABEL: name: test_fcmp_uno_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(uno), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(uno), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpun, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__unorddf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R:%[0-9]+]](s1) = G_TRUNC [[RET]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R:%[0-9]+]](s1) = G_ICMP intpred(ne), [[RET]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_one_s64
+# CHECK-LABEL: name: test_fcmp_one_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(one), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(one), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__gtdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET1:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R1:%[0-9]+]](s1) = G_TRUNC [[RET1]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R1:%[0-9]+]](s1) = G_ICMP intpred(sgt), [[RET1]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmplt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__ltdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET2:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R2:%[0-9]+]](s1) = G_TRUNC [[RET2]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R2:%[0-9]+]](s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO]]
+ ; SOFT-DAG: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]]
+ ; SOFT-DAG: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]]
+ ; SOFT: [[REXT:%[0-9]+]](s32) = G_OR [[R1EXT]], [[R2EXT]]
+ ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fcmp_ueq_s64
+# CHECK-LABEL: name: test_fcmp_ueq_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %4(s64) = G_MERGE_VALUES %0(s32), %1
+ %5(s64) = G_MERGE_VALUES %2(s32), %3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32)
+ %6(s1) = G_FCMP floatpred(ueq), %4(s64), %5
+ ; HARD: [[R:%[0-9]+]](s1) = G_FCMP floatpred(ueq), [[X]](s64), [[Y]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__eqdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET1:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R1:%[0-9]+]](s1) = G_TRUNC [[RET1]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R1:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET1]](s32), [[ZERO]]
+ ; SOFT-NOT: G_FCMP
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X0]]
+ ; SOFT-DAG: %r1 = COPY [[X1]]
+ ; SOFT-DAG: %r2 = COPY [[Y0]]
+ ; SOFT-DAG: %r3 = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dcmpun, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__unorddf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+ ; SOFT: [[RET2:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-AEABI: [[R2:%[0-9]+]](s1) = G_TRUNC [[RET2]](s32)
+ ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+ ; SOFT-DEFAULT: [[R2:%[0-9]+]](s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO]]
+ ; SOFT-DAG: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]]
+ ; SOFT-DAG: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]]
+ ; SOFT: [[REXT:%[0-9]+]](s32) = G_OR [[R1EXT]], [[R2EXT]]
+ ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]]
+ ; SOFT-NOT: G_FCMP
+ %7(s32) = G_ZEXT %6(s1)
+ ; CHECK: [[REXT:%[0-9]+]](s32) = G_ZEXT [[R]](s1)
+ %r0 = COPY %7(s32)
+ ; CHECK: %r0 = COPY [[REXT]]
+ BX_RET 14, _, implicit %r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index bf759728c365..4575341dfc29 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -111,6 +111,7 @@ body: |
%1(s8) = COPY %r1
%2(s8) = G_ADD %0, %1
; G_ADD with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
@@ -136,6 +137,7 @@ body: |
%1(s16) = COPY %r1
%2(s16) = G_ADD %0, %1
; G_ADD with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
@@ -187,6 +189,7 @@ body: |
%1(s8) = COPY %r1
%2(s8) = G_SUB %0, %1
; G_SUB with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
@@ -212,6 +215,7 @@ body: |
%1(s16) = COPY %r1
%2(s16) = G_SUB %0, %1
; G_SUB with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
@@ -263,6 +267,7 @@ body: |
%1(s8) = COPY %r1
%2(s8) = G_MUL %0, %1
; G_MUL with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
@@ -288,6 +293,7 @@ body: |
%1(s16) = COPY %r1
%2(s16) = G_MUL %0, %1
; G_MUL with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
@@ -339,6 +345,7 @@ body: |
%1(s8) = COPY %r1
%2(s8) = G_AND %0, %1
; G_AND with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_AND {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_AND {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s8) = G_AND {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
@@ -364,6 +371,7 @@ body: |
%1(s16) = COPY %r1
%2(s16) = G_AND %0, %1
; G_AND with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_AND {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_AND {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s16) = G_AND {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
@@ -415,6 +423,7 @@ body: |
%1(s8) = COPY %r1
%2(s8) = G_OR %0, %1
; G_OR with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_OR {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_OR {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s8) = G_OR {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
@@ -440,6 +449,7 @@ body: |
%1(s16) = COPY %r1
%2(s16) = G_OR %0, %1
; G_OR with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_OR {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_OR {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s16) = G_OR {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
@@ -491,6 +501,7 @@ body: |
%1(s8) = COPY %r1
%2(s8) = G_XOR %0, %1
; G_XOR with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_XOR {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s8) = G_XOR {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
@@ -516,6 +527,7 @@ body: |
%1(s16) = COPY %r1
%2(s16) = G_XOR %0, %1
; G_XOR with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_XOR {{%[0-9]+, %[0-9]+}}
; CHECK: {{%[0-9]+}}(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
; CHECK-NOT: {{%[0-9]+}}(s16) = G_XOR {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
@@ -689,11 +701,32 @@ selected: false
tracksRegLiveness: true
registers:
- { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
body: |
bb.0:
%0(s32) = G_CONSTANT 42
; CHECK: {{%[0-9]+}}(s32) = G_CONSTANT 42
+ %1(s16) = G_CONSTANT i16 21
+ ; CHECK-NOT: G_CONSTANT i16
+ ; CHECK: [[EXT:%[0-9]+]](s32) = G_CONSTANT i32 21
+ ; CHECK: {{%[0-9]+}}(s16) = G_TRUNC [[EXT]](s32)
+ ; CHECK-NOT: G_CONSTANT i16
+
+ %2(s8) = G_CONSTANT i8 10
+ ; CHECK-NOT: G_CONSTANT i8
+ ; CHECK: [[EXT:%[0-9]+]](s32) = G_CONSTANT i32 10
+ ; CHECK: {{%[0-9]+}}(s8) = G_TRUNC [[EXT]](s32)
+ ; CHECK-NOT: G_CONSTANT i8
+
+ %3(s1) = G_CONSTANT i1 1
+ ; CHECK-NOT: G_CONSTANT i1
+ ; CHECK: [[EXT:%[0-9]+]](s32) = G_CONSTANT i32 -1
+ ; CHECK: {{%[0-9]+}}(s1) = G_TRUNC [[EXT]](s32)
+ ; CHECK-NOT: G_CONSTANT i1
+
%r0 = COPY %0(s32)
BX_RET 14, _, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
index d3b93e488ef4..ffca431d96ea 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
@@ -35,6 +35,8 @@
define void @test_trunc_s32_16() { ret void }
define void @test_icmp_eq_s32() { ret void }
+ define void @test_fcmp_one_s32() #0 { ret void }
+ define void @test_fcmp_ugt_s64() #0 { ret void }
define void @test_select_s32() { ret void }
@@ -743,6 +745,62 @@ body: |
...
---
+name: test_fcmp_one_s32
+# CHECK-LABEL: name: test_fcmp_one_s32
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: fprb, preferred-register: '' }
+# CHECK: - { id: 1, class: fprb, preferred-register: '' }
+# CHECK: - { id: 2, class: gprb, preferred-register: '' }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s1) = G_FCMP floatpred(one), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %r0 = COPY %3(s32)
+ BX_RET 14, _, implicit %r0
+
+...
+---
+name: test_fcmp_ugt_s64
+# CHECK-LABEL: name: test_fcmp_ugt_s64
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: fprb, preferred-register: '' }
+# CHECK: - { id: 1, class: fprb, preferred-register: '' }
+# CHECK: - { id: 2, class: gprb, preferred-register: '' }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s1) = G_FCMP floatpred(ugt), %0(s64), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %r0 = COPY %3(s32)
+ BX_RET 14, _, implicit %r0
+
+...
+---
name: test_select_s32
# CHECK-LABEL: name: test_select_s32
legalized: true
diff --git a/test/CodeGen/ARM/arguments-nosplit-double.ll b/test/CodeGen/ARM/arguments-nosplit-double.ll
index 8e4dee45ddf2..bb3710842d34 100644
--- a/test/CodeGen/ARM/arguments-nosplit-double.ll
+++ b/test/CodeGen/ARM/arguments-nosplit-double.ll
@@ -8,5 +8,6 @@ define i32 @f(i64 %z, i32 %a, double %b) {
ret i32 %tmp
}
+; CHECK-LABEL: f:
; CHECK-NOT: r3
diff --git a/test/CodeGen/ARM/arguments-nosplit-i64.ll b/test/CodeGen/ARM/arguments-nosplit-i64.ll
index 4a08d0a0406a..02bdc6cc227a 100644
--- a/test/CodeGen/ARM/arguments-nosplit-i64.ll
+++ b/test/CodeGen/ARM/arguments-nosplit-i64.ll
@@ -8,5 +8,6 @@ define i32 @f(i64 %z, i32 %a, i64 %b) {
ret i32 %tmp
}
+; CHECK-LABEL: f:
; CHECK-NOT: r3
diff --git a/test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll b/test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll
index d54848a6bcf1..0ae2d5f6f2f2 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll
@@ -13,13 +13,13 @@
; CHECK: rdefs left
; CHECK-NEXT: Latency : 4
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; CHECK-SAME: Latency=1
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=3
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=3
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=4
define i32 @bar(i32 %a1, i32 %b1, i32 %c1) minsize optsize {
%1 = load i32, i32* @a, align 4
diff --git a/test/CodeGen/ARM/cortex-a57-misched-ldm.ll b/test/CodeGen/ARM/cortex-a57-misched-ldm.ll
index 9cb076651f5b..bc7a14b1028e 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-ldm.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-ldm.ll
@@ -8,9 +8,9 @@
; CHECK: rdefs left
; CHECK-NEXT: Latency : 3
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; CHECK-SAME: Latency=3
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=3
define i32 @foo(i32* %a) nounwind optsize {
diff --git a/test/CodeGen/ARM/cortex-a57-misched-stm-wrback.ll b/test/CodeGen/ARM/cortex-a57-misched-stm-wrback.ll
index 774b0a907e39..67cddc14d047 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-stm-wrback.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-stm-wrback.ll
@@ -10,7 +10,7 @@
; CHECK: rdefs left
; CHECK-NEXT: Latency : 2
; CHECK: Successors
-; CHECK: data
+; CHECK: Data
; CHECK-SAME: Latency=1
define i32 @bar(i32 %v0, i32 %v1, i32 %v2, i32* %addr) {
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vfma.ll b/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
index e234e179ed07..372b2e2f5dc9 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
@@ -11,7 +11,7 @@ define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float
; > VMULS common latency = 5
; CHECK: Latency : 5
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMULS read-advanced latency to VMLAS = 0
; CHECK-SAME: Latency=0
@@ -20,7 +20,7 @@ define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float
; > VMLAS common latency = 9
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLAS read-advanced latency to the next VMLAS = 4
; CHECK-SAME: Latency=4
@@ -28,7 +28,7 @@ define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float
; CHECK-FAST: VFMAS
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLAS not-optimized latency to VMOVRS = 9
; CHECK-SAME: Latency=9
@@ -50,7 +50,7 @@ define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
; > VMULfd common latency = 5
; CHECK: Latency : 5
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; VMULfd read-advanced latency to VMLAfd = 0
; CHECK-SAME: Latency=0
@@ -59,7 +59,7 @@ define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
; > VMLAfd common latency = 9
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLAfd read-advanced latency to the next VMLAfd = 4
; CHECK-SAME: Latency=4
@@ -67,7 +67,7 @@ define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
; CHECK-FAST: VFMAfd
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLAfd not-optimized latency to VMOVRRD = 9
; CHECK-SAME: Latency=9
@@ -88,7 +88,7 @@ define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float
; > VMULS common latency = 5
; CHECK: Latency : 5
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMULS read-advanced latency to VMLSS = 0
; CHECK-SAME: Latency=0
@@ -97,7 +97,7 @@ define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float
; > VMLSS common latency = 9
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLSS read-advanced latency to the next VMLSS = 4
; CHECK-SAME: Latency=4
@@ -105,7 +105,7 @@ define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float
; CHECK-FAST: VFMSS
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLSS not-optimized latency to VMOVRS = 9
; CHECK-SAME: Latency=9
@@ -127,7 +127,7 @@ define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
; > VMULfd common latency = 5
; CHECK: Latency : 5
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; VMULfd read-advanced latency to VMLSfd = 0
; CHECK-SAME: Latency=0
@@ -136,7 +136,7 @@ define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
; > VMLSfd common latency = 9
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLSfd read-advanced latency to the next VMLSfd = 4
; CHECK-SAME: Latency=4
@@ -144,7 +144,7 @@ define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
; CHECK-FAST: VFMSfd
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLSfd not-optimized latency to VMOVRRD = 9
; CHECK-SAME: Latency=9
@@ -165,7 +165,7 @@ define float @Test5(float %f1, float %f2, float %f3) {
; CHECK-FAST: VFNMS
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLAS not-optimized latency to VMOVRS = 9
; CHECK-SAME: Latency=9
@@ -184,7 +184,7 @@ define float @Test6(float %f1, float %f2, float %f3) {
; CHECK-FAST: VFNMA
; CHECK: Latency : 9
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; > VMLAS not-optimized latency to VMOVRS = 9
; CHECK-SAME: Latency=9
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll b/test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll
index 6cfa823fb969..b5edcc304229 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll
@@ -13,15 +13,15 @@
; CHECK: rdefs left
; CHECK-NEXT: Latency : 6
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; CHECK-SAME: Latency=1
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=1
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=5
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=5
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=6
define i32 @bar(i32* %iptr) minsize optsize {
%1 = load double, double* @a, align 8
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vldm.ll b/test/CodeGen/ARM/cortex-a57-misched-vldm.ll
index 218b5b41a7e4..12c7b3270c3b 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vldm.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vldm.ll
@@ -8,11 +8,11 @@
; CHECK: rdefs left
; CHECK-NEXT: Latency : 6
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; CHECK-SAME: Latency=5
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=5
-; CHECK-NEXT: data
+; CHECK-NEXT: Data
; CHECK-SAME: Latency=6
define double @foo(double* %a) nounwind optsize {
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vstm-wrback.ll b/test/CodeGen/ARM/cortex-a57-misched-vstm-wrback.ll
index af1c469d4443..05c498eee49f 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vstm-wrback.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vstm-wrback.ll
@@ -9,7 +9,7 @@
; CHECK: rdefs left
; CHECK-NEXT: Latency : 4
; CHECK: Successors:
-; CHECK: data
+; CHECK: Data
; CHECK-SAME: Latency=1
@a = global double 0.0, align 4
diff --git a/test/CodeGen/ARM/fence-singlethread.ll b/test/CodeGen/ARM/fence-singlethread.ll
index ec032ccac423..536b6cc7c9d0 100644
--- a/test/CodeGen/ARM/fence-singlethread.ll
+++ b/test/CodeGen/ARM/fence-singlethread.ll
@@ -11,6 +11,6 @@ define void @fence_singlethread() {
; CHECK: @ COMPILER BARRIER
; CHECK-NOT: dmb
- fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
ret void
}
diff --git a/test/CodeGen/ARM/ror.ll b/test/CodeGen/ARM/ror.ll
new file mode 100644
index 000000000000..0f699a8dd29d
--- /dev/null
+++ b/test/CodeGen/ARM/ror.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
+
+; rotr (rotr x, 4), 6 -> rotr x, 10 -> ror r0, r0, #10
+define i32 @test1(i32 %x) nounwind readnone {
+; CHECK-LABEL: test1:
+; CHECK: ror r0, r0, #10
+; CHECK: bx lr
+entry:
+ %high_part.i = shl i32 %x, 28
+ %low_part.i = lshr i32 %x, 4
+ %result.i = or i32 %high_part.i, %low_part.i
+ %high_part.i.1 = shl i32 %result.i, 26
+ %low_part.i.2 = lshr i32 %result.i, 6
+ %result.i.3 = or i32 %low_part.i.2, %high_part.i.1
+ ret i32 %result.i.3
+}
+
+; the same vector test
+define <2 x i32> @test2(<2 x i32> %x) nounwind readnone {
+; CHECK-LABEL: test2:
+; CHECK: ror r0, r0, #10
+; CHECK: ror r1, r1, #10
+; CHECK: bx lr
+entry:
+ %high_part.i = shl <2 x i32> %x, <i32 28, i32 28>
+ %low_part.i = lshr <2 x i32> %x, <i32 4, i32 4>
+ %result.i = or <2 x i32> %high_part.i, %low_part.i
+ %high_part.i.1 = shl <2 x i32> %result.i, <i32 26, i32 26>
+ %low_part.i.2 = lshr <2 x i32> %result.i, <i32 6, i32 6>
+ %result.i.3 = or <2 x i32> %low_part.i.2, %high_part.i.1
+ ret <2 x i32> %result.i.3
+}
+
diff --git a/test/CodeGen/ARM/scavenging.mir b/test/CodeGen/ARM/scavenging.mir
new file mode 100644
index 000000000000..09040a3bd217
--- /dev/null
+++ b/test/CodeGen/ARM/scavenging.mir
@@ -0,0 +1,66 @@
+# RUN: llc -o - %s -mtriple=arm-arm-none-eabi -mcpu=cortex-m0 -run-pass scavenger-test | FileCheck %s
+---
+# CHECK-LABEL: name: scavengebug0
+# Make sure we are not spilling/using a physreg used in the very last
+# instruction of the scavenging range.
+# CHECK-NOT: tSTRi {{.*}}%r0,{{.*}}%r0
+# CHECK-NOT: tSTRi {{.*}}%r1,{{.*}}%r1
+# CHECK-NOT: tSTRi {{.*}}%r2,{{.*}}%r2
+# CHECK-NOT: tSTRi {{.*}}%r3,{{.*}}%r3
+# CHECK-NOT: tSTRi {{.*}}%r4,{{.*}}%r4
+# CHECK-NOT: tSTRi {{.*}}%r5,{{.*}}%r5
+# CHECK-NOT: tSTRi {{.*}}%r6,{{.*}}%r6
+# CHECK-NOT: tSTRi {{.*}}%r7,{{.*}}%r7
+name: scavengebug0
+body: |
+ bb.0:
+ ; Bring up register pressure to force emergency spilling
+ %r0 = IMPLICIT_DEF
+ %r1 = IMPLICIT_DEF
+ %r2 = IMPLICIT_DEF
+ %r3 = IMPLICIT_DEF
+ %r4 = IMPLICIT_DEF
+ %r5 = IMPLICIT_DEF
+ %r6 = IMPLICIT_DEF
+ %r7 = IMPLICIT_DEF
+
+ %0 : tgpr = IMPLICIT_DEF
+ %0 = tADDhirr %0, %sp, 14, _
+ tSTRi %r0, %0, 0, 14, _
+
+ %1 : tgpr = IMPLICIT_DEF
+ %1 = tADDhirr %1, %sp, 14, _
+ tSTRi %r1, %1, 0, 14, _
+
+ %2 : tgpr = IMPLICIT_DEF
+ %2 = tADDhirr %2, %sp, 14, _
+ tSTRi %r2, %2, 0, 14, _
+
+ %3 : tgpr = IMPLICIT_DEF
+ %3 = tADDhirr %3, %sp, 14, _
+ tSTRi %r3, %3, 0, 14, _
+
+ %4 : tgpr = IMPLICIT_DEF
+ %4 = tADDhirr %4, %sp, 14, _
+ tSTRi %r4, %4, 0, 14, _
+
+ %5 : tgpr = IMPLICIT_DEF
+ %5 = tADDhirr %5, %sp, 14, _
+ tSTRi %r5, %5, 0, 14, _
+
+ %6 : tgpr = IMPLICIT_DEF
+ %6 = tADDhirr %6, %sp, 14, _
+ tSTRi %r6, %6, 0, 14, _
+
+ %7 : tgpr = IMPLICIT_DEF
+ %7 = tADDhirr %7, %sp, 14, _
+ tSTRi %r7, %7, 0, 14, _
+
+ KILL %r0
+ KILL %r1
+ KILL %r2
+ KILL %r3
+ KILL %r4
+ KILL %r5
+ KILL %r6
+ KILL %r7
diff --git a/test/CodeGen/AVR/branch-relaxation.ll b/test/CodeGen/AVR/branch-relaxation.ll
new file mode 100644
index 000000000000..d6f07f653576
--- /dev/null
+++ b/test/CodeGen/AVR/branch-relaxation.ll
@@ -0,0 +1,96 @@
+; RUN: llc < %s -march=avr | FileCheck %s
+
+; CHECKC-LABEL: relax_breq
+; CHECK: cpi r{{[0-9]+}}, 0
+; CHECK: brne LBB0_1
+; CHECK: rjmp LBB0_2
+; LBB0_1:
+
+define i8 @relax_breq(i1 %a) {
+entry-block:
+ br i1 %a, label %hello, label %finished
+
+hello:
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ br label %finished
+finished:
+ ret i8 3
+}
+
+; CHECKC-LABEL: no_relax_breq
+; CHECK: cpi r{{[0-9]+}}, 0
+; CHECK: breq [[END_BB:LBB[0-9]+_[0-9]+]]
+; CHECK: nop
+; ...
+; LBB0_1:
+define i8 @no_relax_breq(i1 %a) {
+entry-block:
+ br i1 %a, label %hello, label %finished
+
+hello:
+ ; There are not enough NOPs to require relaxation.
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "nop", ""()
+ br label %finished
+finished:
+ ret i8 3
+}
+
diff --git a/test/CodeGen/AVR/ctlz.ll b/test/CodeGen/AVR/ctlz.ll
index 4f73e846b1f1..8659550baf90 100644
--- a/test/CodeGen/AVR/ctlz.ll
+++ b/test/CodeGen/AVR/ctlz.ll
@@ -10,7 +10,8 @@ declare i8 @llvm.ctlz.i8(i8)
; CHECK-LABEL: count_leading_zeros:
; CHECK: cpi [[RESULT:r[0-9]+]], 0
-; CHECK: breq LBB0_1
+; CHECK: brne LBB0_1
+; CHECK: rjmp LBB0_2
; CHECK: mov [[SCRATCH:r[0-9]+]], {{.*}}[[RESULT]]
; CHECK: lsr {{.*}}[[SCRATCH]]
; CHECK: or {{.*}}[[SCRATCH]], {{.*}}[[RESULT]]
@@ -43,6 +44,6 @@ declare i8 @llvm.ctlz.i8(i8)
; CHECK: add {{.*}}[[RESULT]], {{.*}}[[SCRATCH]]
; CHECK: andi {{.*}}[[RESULT]], 15
; CHECK: ret
-; CHECK: LBB0_1:
+; CHECK: LBB0_2:
; CHECK: ldi {{.*}}[[RESULT]], 8
; CHECK: ret
diff --git a/test/CodeGen/AVR/cttz.ll b/test/CodeGen/AVR/cttz.ll
index 2501566275ea..02d36954f526 100644
--- a/test/CodeGen/AVR/cttz.ll
+++ b/test/CodeGen/AVR/cttz.ll
@@ -10,7 +10,7 @@ declare i8 @llvm.cttz.i8(i8)
; CHECK-LABEL: count_trailing_zeros:
; CHECK: cpi [[RESULT:r[0-9]+]], 0
-; CHECK: breq LBB0_1
+; CHECK: breq [[END_BB:LBB[0-9]+_[0-9]+]]
; CHECK: mov [[SCRATCH:r[0-9]+]], {{.*}}[[RESULT]]
; CHECK: dec {{.*}}[[SCRATCH]]
; CHECK: com {{.*}}[[RESULT]]
@@ -34,7 +34,7 @@ declare i8 @llvm.cttz.i8(i8)
; CHECK: andi {{.*}}[[SCRATCH]], 15
; CHECK: mov {{.*}}[[RESULT]], {{.*}}[[SCRATCH]]
; CHECK: ret
-; CHECK: LBB0_1:
+; CHECK: [[END_BB]]:
; CHECK: ldi {{.*}}[[SCRATCH]], 8
; CHECK: mov {{.*}}[[RESULT]], {{.*}}[[SCRATCH]]
; CHECK: ret
diff --git a/test/CodeGen/AVR/frmidx-iterator-bug.ll b/test/CodeGen/AVR/frmidx-iterator-bug.ll
new file mode 100644
index 000000000000..f9e2f0688faf
--- /dev/null
+++ b/test/CodeGen/AVR/frmidx-iterator-bug.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -march=avr -mattr=avr6 | FileCheck %s
+
+%str_slice = type { i8*, i16 }
+%Machine = type { i16, [0 x i8], i16, [0 x i8], [16 x i8], [0 x i8] }
+
+; CHECK-LABEL: step
+define void @step(%Machine*) {
+ ret void
+}
+
+; CHECK-LABEL: main
+define void @main() {
+start:
+ %machine = alloca %Machine, align 8
+ %v0 = bitcast %Machine* %machine to i8*
+ %v1 = getelementptr inbounds %Machine, %Machine* %machine, i16 0, i32 2
+ %v2 = load i16, i16* %v1, align 2
+ br label %bb2.i5
+
+bb2.i5:
+ %v18 = load volatile i8, i8* inttoptr (i16 77 to i8*), align 1
+ %v19 = icmp sgt i8 %v18, -1
+ br i1 %v19, label %bb2.i5, label %bb.exit6
+
+bb.exit6:
+ %v20 = load volatile i8, i8* inttoptr (i16 78 to i8*), align 2
+ br label %bb7
+
+bb7:
+ call void @step(%Machine* %machine)
+ br label %bb7
+}
+
diff --git a/test/CodeGen/AVR/icall-func-pointer-correct-addr-space.ll b/test/CodeGen/AVR/icall-func-pointer-correct-addr-space.ll
new file mode 100644
index 000000000000..17ac29e2cdb8
--- /dev/null
+++ b/test/CodeGen/AVR/icall-func-pointer-correct-addr-space.ll
@@ -0,0 +1,15 @@
+; RUN: llc -mattr=lpm,lpmw < %s -march=avr | FileCheck %s
+
+declare void @callback(i16 zeroext)
+
+; CHECK-LABEL: foo
+define void @foo() {
+entry:
+ ; CHECK: ldi r{{[0-9]+}}, pm_lo8(callback)
+ ; CHECK-NEXT: ldi r{{[0-9]+}}, pm_hi8(callback)
+ call void @bar(i8 zeroext undef, void (i16)* @callback)
+ ret void
+}
+
+declare void @bar(i8 zeroext, void (i16)*)
+
diff --git a/test/CodeGen/AVR/pseudo/ANDIWRdK.mir b/test/CodeGen/AVR/pseudo/ANDIWRdK.mir
index bcea4e6dfe27..4d58c85f4f23 100644
--- a/test/CodeGen/AVR/pseudo/ANDIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/ANDIWRdK.mir
@@ -17,8 +17,8 @@ body: |
; CHECK-LABEL: test_andiwrdrr
- ; CHECK: %r20 = ANDIRdK %r20, 175, implicit-def dead %sreg
- ; CHECK-NEXT: %r21 = ANDIRdK %r21, 250, implicit-def %sreg
+ ; CHECK: %r16 = ANDIRdK %r16, 175, implicit-def dead %sreg
+ ; CHECK-NEXT: %r17 = ANDIRdK %r17, 250, implicit-def %sreg
- %r21r20 = ANDIWRdK %r17r16, 64175, implicit-def %sreg
+ %r17r16 = ANDIWRdK %r17r16, 64175, implicit-def %sreg
...
diff --git a/test/CodeGen/AVR/pseudo/COMWRd.mir b/test/CodeGen/AVR/pseudo/COMWRd.mir
index 58ff7af7cb3c..db68a4082b73 100644
--- a/test/CodeGen/AVR/pseudo/COMWRd.mir
+++ b/test/CodeGen/AVR/pseudo/COMWRd.mir
@@ -20,5 +20,5 @@ body: |
; CHECK: %r14 = COMRd %r14, implicit-def dead %sreg
; CHECK-NEXT: %r15 = COMRd %r15, implicit-def %sreg
- %r15r14 = COMWRd %r9r8, implicit-def %sreg
+ %r15r14 = COMWRd %r15r14, implicit-def %sreg
...
diff --git a/test/CodeGen/AVR/pseudo/ORIWRdK.mir b/test/CodeGen/AVR/pseudo/ORIWRdK.mir
index d77a6ba88488..eaa12842df42 100644
--- a/test/CodeGen/AVR/pseudo/ORIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/ORIWRdK.mir
@@ -20,5 +20,5 @@ body: |
; CHECK: %r20 = ORIRdK %r20, 175, implicit-def dead %sreg
; CHECK-NEXT: %r21 = ORIRdK %r21, 250, implicit-def %sreg
- %r21r20 = ORIWRdK %r17r16, 64175, implicit-def %sreg
+ %r21r20 = ORIWRdK %r21r20, 64175, implicit-def %sreg
...
diff --git a/test/CodeGen/AVR/pseudo/SBCIWRdK.mir b/test/CodeGen/AVR/pseudo/SBCIWRdK.mir
index 644e6106ee79..a92f6951798b 100644
--- a/test/CodeGen/AVR/pseudo/SBCIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/SBCIWRdK.mir
@@ -20,5 +20,5 @@ body: |
; CHECK: %r20 = SBCIRdK %r20, 175, implicit-def %sreg, implicit killed %sreg
; CHECK-NEXT: %r21 = SBCIRdK %r21, 250, implicit-def %sreg, implicit killed %sreg
- %r21r20 = SBCIWRdK %r17r16, 64175, implicit-def %sreg, implicit %sreg
+ %r21r20 = SBCIWRdK %r21r20, 64175, implicit-def %sreg, implicit %sreg
...
diff --git a/test/CodeGen/AVR/pseudo/SUBIWRdK.mir b/test/CodeGen/AVR/pseudo/SUBIWRdK.mir
index c7d88d7ab3f6..38ff880a5172 100644
--- a/test/CodeGen/AVR/pseudo/SUBIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/SUBIWRdK.mir
@@ -20,5 +20,5 @@ body: |
; CHECK: %r20 = SUBIRdK %r20, 175, implicit-def %sreg
; CHECK-NEXT: %r21 = SBCIRdK %r21, 250, implicit-def %sreg, implicit killed %sreg
- %r21r20 = SUBIWRdK %r17r16, 64175, implicit-def %sreg
+ %r21r20 = SUBIWRdK %r21r20, 64175, implicit-def %sreg
...
diff --git a/test/CodeGen/AVR/select-mbb-placement-bug.ll b/test/CodeGen/AVR/select-mbb-placement-bug.ll
index ca7ec1ab831c..aca9502b5dfb 100644
--- a/test/CodeGen/AVR/select-mbb-placement-bug.ll
+++ b/test/CodeGen/AVR/select-mbb-placement-bug.ll
@@ -8,9 +8,9 @@ define internal fastcc void @loopy() {
;
; https://github.com/avr-rust/rust/issues/49
-; CHECK: LBB0_1:
-; CHECK: LBB0_2:
-; CHECK-NOT: LBB0_3:
+; CHECK: LBB0_{{[0-9]+}}:
+; CHECK: LBB0_{{[0-9]+}}:
+; CHECK-NOT: LBB0_{{[0-9]+}}:
start:
br label %bb7.preheader
diff --git a/test/CodeGen/BPF/undef.ll b/test/CodeGen/BPF/undef.ll
index de14bfde1ab9..8d8a5f429514 100644
--- a/test/CodeGen/BPF/undef.ll
+++ b/test/CodeGen/BPF/undef.ll
@@ -1,4 +1,5 @@
-; RUN: not llc < %s -march=bpf | FileCheck %s
+; RUN: not llc < %s -march=bpfel | FileCheck -check-prefixes=CHECK,EL %s
+; RUN: not llc < %s -march=bpfeb | FileCheck -check-prefixes=CHECK,EB %s
%struct.bpf_map_def = type { i32, i32, i32, i32 }
%struct.__sk_buff = type opaque
@@ -13,36 +14,31 @@
; Function Attrs: nounwind uwtable
define i32 @ebpf_filter(%struct.__sk_buff* nocapture readnone %ebpf_packet) #0 section "socket1" {
-; CHECK: r2 = r10
-; CHECK: r2 += -2
-; CHECK: r1 = 0
-; CHECK: *(u16 *)(r2 + 6) = r1
-; CHECK: *(u16 *)(r2 + 4) = r1
-; CHECK: *(u16 *)(r2 + 2) = r1
-; CHECK: r2 = 6
-; CHECK: *(u8 *)(r10 - 7) = r2
-; CHECK: r2 = 5
-; CHECK: *(u8 *)(r10 - 8) = r2
-; CHECK: r2 = 7
-; CHECK: *(u8 *)(r10 - 6) = r2
-; CHECK: r2 = 8
-; CHECK: *(u8 *)(r10 - 5) = r2
-; CHECK: r2 = 9
-; CHECK: *(u8 *)(r10 - 4) = r2
-; CHECK: r2 = 10
-; CHECK: *(u8 *)(r10 - 3) = r2
-; CHECK: *(u16 *)(r10 + 24) = r1
-; CHECK: *(u16 *)(r10 + 22) = r1
-; CHECK: *(u16 *)(r10 + 20) = r1
-; CHECK: *(u16 *)(r10 + 18) = r1
-; CHECK: *(u16 *)(r10 + 16) = r1
-; CHECK: *(u16 *)(r10 + 14) = r1
-; CHECK: *(u16 *)(r10 + 12) = r1
-; CHECK: *(u16 *)(r10 + 10) = r1
-; CHECK: *(u16 *)(r10 + 8) = r1
-; CHECK: *(u16 *)(r10 + 6) = r1
-; CHECK: *(u16 *)(r10 - 2) = r1
-; CHECK: *(u16 *)(r10 + 26) = r1
+; CHECK: r1 = r10
+; CHECK: r1 += -2
+; CHECK: r2 = 0
+; CHECK: *(u16 *)(r1 + 6) = r2
+; CHECK: *(u16 *)(r1 + 4) = r2
+; CHECK: *(u16 *)(r1 + 2) = r2
+; EL: r1 = 134678021
+; EB: r1 = 84281096
+; CHECK: *(u32 *)(r10 - 8) = r1
+; CHECK: r1 = 9
+; CHECK: *(u8 *)(r10 - 4) = r1
+; CHECK: r1 = 10
+; CHECK: *(u8 *)(r10 - 3) = r1
+; CHECK: *(u16 *)(r10 + 24) = r2
+; CHECK: *(u16 *)(r10 + 22) = r2
+; CHECK: *(u16 *)(r10 + 20) = r2
+; CHECK: *(u16 *)(r10 + 18) = r2
+; CHECK: *(u16 *)(r10 + 16) = r2
+; CHECK: *(u16 *)(r10 + 14) = r2
+; CHECK: *(u16 *)(r10 + 12) = r2
+; CHECK: *(u16 *)(r10 + 10) = r2
+; CHECK: *(u16 *)(r10 + 8) = r2
+; CHECK: *(u16 *)(r10 + 6) = r2
+; CHECK: *(u16 *)(r10 - 2) = r2
+; CHECK: *(u16 *)(r10 + 26) = r2
; CHECK: r2 = r10
; CHECK: r2 += -8
; CHECK: r1 = <MCOperand Expr:(routing)>ll
diff --git a/test/CodeGen/Generic/pr33094.ll b/test/CodeGen/Generic/pr33094.ll
new file mode 100644
index 000000000000..afa464f63f66
--- /dev/null
+++ b/test/CodeGen/Generic/pr33094.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s
+
+; PR33094
+; Make sure that a constant extractvalue doesn't cause a crash in
+; SelectionDAGBuilder::visitExtractValue.
+
+%A = type {}
+%B = type {}
+%Tuple = type { i64 }
+
+@A_Inst = global %A zeroinitializer
+@B_Inst = global %B zeroinitializer
+
+define i64 @foo() {
+ ret i64 extractvalue (%Tuple select (i1 icmp eq
+ (%B* bitcast (%A* @A_Inst to %B*), %B* @B_Inst),
+ %Tuple { i64 33 }, %Tuple { i64 42 }), 0)
+}
diff --git a/test/CodeGen/Hexagon/convertdptoint.ll b/test/CodeGen/Hexagon/convertdptoint.ll
index a09c2fd14b12..adf76e5dc82e 100644
--- a/test/CodeGen/Hexagon/convertdptoint.ll
+++ b/test/CodeGen/Hexagon/convertdptoint.ll
@@ -12,10 +12,10 @@ entry:
%b = alloca double, align 8
%c = alloca double, align 8
store i32 0, i32* %retval
- store double 1.540000e+01, double* %a, align 8
- store double 9.100000e+00, double* %b, align 8
- %0 = load double, double* %a, align 8
- %1 = load double, double* %b, align 8
+ store volatile double 1.540000e+01, double* %a, align 8
+ store volatile double 9.100000e+00, double* %b, align 8
+ %0 = load volatile double, double* %a, align 8
+ %1 = load volatile double, double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
%2 = load double, double* %c, align 8
diff --git a/test/CodeGen/Hexagon/convertdptoll.ll b/test/CodeGen/Hexagon/convertdptoll.ll
index f46d46cf76b1..6b5bf56a248b 100644
--- a/test/CodeGen/Hexagon/convertdptoll.ll
+++ b/test/CodeGen/Hexagon/convertdptoll.ll
@@ -17,8 +17,8 @@ entry:
%0 = load double, double* %a, align 8
%1 = load double, double* %b, align 8
%add = fadd double %0, %1
- store double %add, double* %c, align 8
- %2 = load double, double* %c, align 8
+ store volatile double %add, double* %c, align 8
+ %2 = load volatile double, double* %c, align 8
%conv = fptosi double %2 to i64
store i64 %conv, i64* %i, align 8
%3 = load i64, i64* %i, align 8
diff --git a/test/CodeGen/Hexagon/convertsptoint.ll b/test/CodeGen/Hexagon/convertsptoint.ll
index 7593e57d852f..939b3b06a8c7 100644
--- a/test/CodeGen/Hexagon/convertsptoint.ll
+++ b/test/CodeGen/Hexagon/convertsptoint.ll
@@ -17,8 +17,8 @@ entry:
%0 = load float, float* %a, align 4
%1 = load float, float* %b, align 4
%add = fadd float %0, %1
- store float %add, float* %c, align 4
- %2 = load float, float* %c, align 4
+ store volatile float %add, float* %c, align 4
+ %2 = load volatile float, float* %c, align 4
%conv = fptosi float %2 to i32
store i32 %conv, i32* %i, align 4
%3 = load i32, i32* %i, align 4
diff --git a/test/CodeGen/Hexagon/convertsptoll.ll b/test/CodeGen/Hexagon/convertsptoll.ll
index d8432cbc812b..f540397ccf5e 100644
--- a/test/CodeGen/Hexagon/convertsptoll.ll
+++ b/test/CodeGen/Hexagon/convertsptoll.ll
@@ -17,8 +17,8 @@ entry:
%0 = load float, float* %a, align 4
%1 = load float, float* %b, align 4
%add = fadd float %0, %1
- store float %add, float* %c, align 4
- %2 = load float, float* %c, align 4
+ store volatile float %add, float* %c, align 4
+ %2 = load volatile float, float* %c, align 4
%conv = fptosi float %2 to i64
store i64 %conv, i64* %i, align 8
%3 = load i64, i64* %i, align 8
diff --git a/test/CodeGen/Hexagon/dadd.ll b/test/CodeGen/Hexagon/dadd.ll
index 5fcd705bab23..3068f499d12d 100644
--- a/test/CodeGen/Hexagon/dadd.ll
+++ b/test/CodeGen/Hexagon/dadd.ll
@@ -9,10 +9,10 @@ entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
- store double 1.540000e+01, double* %a, align 8
- store double 9.100000e+00, double* %b, align 8
- %0 = load double, double* %a, align 8
- %1 = load double, double* %b, align 8
+ store volatile double 1.540000e+01, double* %a, align 8
+ store volatile double 9.100000e+00, double* %b, align 8
+ %0 = load volatile double, double* %a, align 8
+ %1 = load volatile double, double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
ret i32 0
diff --git a/test/CodeGen/Hexagon/dmul.ll b/test/CodeGen/Hexagon/dmul.ll
index 1b79e0aa7d70..a6cf62b0c0aa 100644
--- a/test/CodeGen/Hexagon/dmul.ll
+++ b/test/CodeGen/Hexagon/dmul.ll
@@ -8,10 +8,10 @@ entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
- store double 1.540000e+01, double* %a, align 8
- store double 9.100000e+00, double* %b, align 8
- %0 = load double, double* %b, align 8
- %1 = load double, double* %a, align 8
+ store volatile double 1.540000e+01, double* %a, align 8
+ store volatile double 9.100000e+00, double* %b, align 8
+ %0 = load volatile double, double* %b, align 8
+ %1 = load volatile double, double* %a, align 8
%mul = fmul double %0, %1
store double %mul, double* %c, align 8
ret i32 0
diff --git a/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll b/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll
index 6bf8224904ec..ccc287c5f2bc 100644
--- a/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll
+++ b/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll
@@ -12,10 +12,10 @@ entry:
%b = alloca double, align 8
%c = alloca double, align 8
store i32 0, i32* %retval
- store double 1.540000e+01, double* %a, align 8
- store double 9.100000e+00, double* %b, align 8
- %0 = load double, double* %a, align 8
- %1 = load double, double* %b, align 8
+ store volatile double 1.540000e+01, double* %a, align 8
+ store volatile double 9.100000e+00, double* %b, align 8
+ %0 = load volatile double, double* %a, align 8
+ %1 = load volatile double, double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
%2 = load double, double* %c, align 8
diff --git a/test/CodeGen/Hexagon/dsub.ll b/test/CodeGen/Hexagon/dsub.ll
index 8b37301d84fb..d7e44b307cf8 100644
--- a/test/CodeGen/Hexagon/dsub.ll
+++ b/test/CodeGen/Hexagon/dsub.ll
@@ -8,10 +8,10 @@ entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
- store double 1.540000e+01, double* %a, align 8
- store double 9.100000e+00, double* %b, align 8
- %0 = load double, double* %b, align 8
- %1 = load double, double* %a, align 8
+ store volatile double 1.540000e+01, double* %a, align 8
+ store volatile double 9.100000e+00, double* %b, align 8
+ %0 = load volatile double, double* %b, align 8
+ %1 = load volatile double, double* %a, align 8
%sub = fsub double %0, %1
store double %sub, double* %c, align 8
ret i32 0
diff --git a/test/CodeGen/Hexagon/fadd.ll b/test/CodeGen/Hexagon/fadd.ll
index 0418c1724f5b..65c6182dcc77 100644
--- a/test/CodeGen/Hexagon/fadd.ll
+++ b/test/CodeGen/Hexagon/fadd.ll
@@ -8,10 +8,10 @@ entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
- store float 0x402ECCCCC0000000, float* %a, align 4
- store float 0x4022333340000000, float* %b, align 4
- %0 = load float, float* %a, align 4
- %1 = load float, float* %b, align 4
+ store volatile float 0x402ECCCCC0000000, float* %a, align 4
+ store volatile float 0x4022333340000000, float* %b, align 4
+ %0 = load volatile float, float* %a, align 4
+ %1 = load volatile float, float* %b, align 4
%add = fadd float %0, %1
store float %add, float* %c, align 4
ret i32 0
diff --git a/test/CodeGen/Hexagon/fmul.ll b/test/CodeGen/Hexagon/fmul.ll
index 552f98ec7a53..e20e293c0a13 100644
--- a/test/CodeGen/Hexagon/fmul.ll
+++ b/test/CodeGen/Hexagon/fmul.ll
@@ -9,10 +9,10 @@ entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
- store float 0x402ECCCCC0000000, float* %a, align 4
- store float 0x4022333340000000, float* %b, align 4
- %0 = load float, float* %b, align 4
- %1 = load float, float* %a, align 4
+ store volatile float 0x402ECCCCC0000000, float* %a, align 4
+ store volatile float 0x4022333340000000, float* %b, align 4
+ %0 = load volatile float, float* %b, align 4
+ %1 = load volatile float, float* %a, align 4
%mul = fmul float %0, %1
store float %mul, float* %c, align 4
ret i32 0
diff --git a/test/CodeGen/Hexagon/fsub.ll b/test/CodeGen/Hexagon/fsub.ll
index d7b0e2f65b33..e9a1fa3d192b 100644
--- a/test/CodeGen/Hexagon/fsub.ll
+++ b/test/CodeGen/Hexagon/fsub.ll
@@ -8,10 +8,10 @@ entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
- store float 0x402ECCCCC0000000, float* %a, align 4
- store float 0x4022333340000000, float* %b, align 4
- %0 = load float, float* %b, align 4
- %1 = load float, float* %a, align 4
+ store volatile float 0x402ECCCCC0000000, float* %a, align 4
+ store volatile float 0x4022333340000000, float* %b, align 4
+ %0 = load volatile float, float* %b, align 4
+ %1 = load volatile float, float* %a, align 4
%sub = fsub float %0, %1
store float %sub, float* %c, align 4
ret i32 0
diff --git a/test/CodeGen/Hexagon/hasfp-crash1.ll b/test/CodeGen/Hexagon/hasfp-crash1.ll
new file mode 100644
index 000000000000..1154a7117a70
--- /dev/null
+++ b/test/CodeGen/Hexagon/hasfp-crash1.ll
@@ -0,0 +1,82 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check that this testcase does not crash.
+; CHECK: jumpr r31
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+declare i32 @foo0(i32*, i32, i64, i32, i8 zeroext, i8 zeroext, i32) local_unnamed_addr #0
+
+; Function Attrs: nounwind
+define i32 @foo1(i32* %a0, i32 %a1, i32 %a2, i32 %a3, i8 zeroext %a4, i8 zeroext %a5, i32 %a6) local_unnamed_addr #0 !dbg !33 {
+entry:
+ tail call void @llvm.dbg.value(metadata i32 %a6, i64 0, metadata !51, metadata !52), !dbg !53
+ ret i32 undef, !dbg !54
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+attributes #0 = { nounwind "disable-tail-calls"="true" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv5" "target-features"="-hvx-double,-long-calls" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!26, !27}
+!llvm.linker.options = !{!29, !30, !31, !32, !29, !30, !31, !32}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Clang", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !22)
+!1 = !DIFile(filename: "foo.i", directory: "/path")
+!2 = !{!3, !16}
+!3 = !DICompositeType(tag: DW_TAG_enumeration_type, file: !4, line: 122, size: 8, elements: !5)
+!4 = !DIFile(filename: "foo.h", directory: "/path")
+!5 = !{!6, !7, !8, !9, !10, !11, !12, !13, !14, !15}
+!6 = !DIEnumerator(name: "E0", value: 7)
+!7 = !DIEnumerator(name: "E1", value: 6)
+!8 = !DIEnumerator(name: "E2", value: 5)
+!9 = !DIEnumerator(name: "E3", value: 0)
+!10 = !DIEnumerator(name: "E4", value: 1)
+!11 = !DIEnumerator(name: "E5", value: 7)
+!12 = !DIEnumerator(name: "E6", value: 5)
+!13 = !DIEnumerator(name: "E7", value: 4)
+!14 = !DIEnumerator(name: "E8", value: 4)
+!15 = !DIEnumerator(name: "E9", value: 10)
+!16 = !DICompositeType(tag: DW_TAG_enumeration_type, file: !4, line: 136, size: 8, elements: !17)
+!17 = !{!18, !19, !20, !21}
+!18 = !DIEnumerator(name: "F0", value: 1)
+!19 = !DIEnumerator(name: "F1", value: 2)
+!20 = !DIEnumerator(name: "F2", value: 4)
+!21 = !DIEnumerator(name: "F3", value: 7)
+!22 = !{!23, !24, !25}
+!23 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!24 = !DIBasicType(name: "long long unsigned int", size: 64, encoding: DW_ATE_unsigned)
+!25 = !DIDerivedType(tag: DW_TAG_typedef, name: "t0_t", file: !4, line: 38, baseType: !24)
+!26 = !{i32 2, !"Debug Info Version", i32 3}
+!27 = !{i32 6, !"Linker Options", !28}
+!28 = !{!29, !30, !31, !32}
+!29 = !{!"foo0", !".text"}
+!30 = !{!"foo1", !".text"}
+!31 = !{!"foo2", !".text"}
+!32 = !{!"foo3", !".text"}
+!33 = distinct !DISubprogram(name: "foo1", scope: !34, file: !34, line: 84, type: !35, isLocal: false, isDefinition: true, scopeLine: 85, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !44)
+!34 = !DIFile(filename: "foo.c", directory: "/path")
+!35 = !DISubroutineType(types: !36)
+!36 = !{!37, !38, !39, !40, !41, !42, !43, !37}
+!37 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!38 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 32)
+!39 = !DIDerivedType(tag: DW_TAG_typedef, name: "t1_t", file: !4, line: 35, baseType: !23)
+!40 = !DIDerivedType(tag: DW_TAG_typedef, name: "t2_t", file: !4, line: 36, baseType: !23)
+!41 = !DIDerivedType(tag: DW_TAG_typedef, name: "t3_t", file: !4, line: 43, baseType: !23)
+!42 = !DIDerivedType(tag: DW_TAG_typedef, name: "t4_t", file: !4, line: 133, baseType: !3)
+!43 = !DIDerivedType(tag: DW_TAG_typedef, name: "t5_t", file: !4, line: 141, baseType: !16)
+!44 = !{!45, !46, !47, !48, !49, !50, !51}
+!45 = !DILocalVariable(name: "a0", arg: 1, scope: !33, file: !34, line: 84, type: !38)
+!46 = !DILocalVariable(name: "a1", arg: 2, scope: !33, file: !34, line: 84, type: !39)
+!47 = !DILocalVariable(name: "a2", arg: 3, scope: !33, file: !34, line: 84, type: !40)
+!48 = !DILocalVariable(name: "a3", arg: 4, scope: !33, file: !34, line: 84, type: !41)
+!49 = !DILocalVariable(name: "a4", arg: 5, scope: !33, file: !34, line: 84, type: !42)
+!50 = !DILocalVariable(name: "a5", arg: 6, scope: !33, file: !34, line: 84, type: !43)
+!51 = !DILocalVariable(name: "a6", arg: 7, scope: !33, file: !34, line: 84, type: !37)
+!52 = !DIExpression()
+!53 = !DILocation(line: 84, column: 169, scope: !33)
+!54 = !DILocation(line: 86, column: 5, scope: !33)
diff --git a/test/CodeGen/Hexagon/hasfp-crash2.ll b/test/CodeGen/Hexagon/hasfp-crash2.ll
new file mode 100644
index 000000000000..c8b49948ce74
--- /dev/null
+++ b/test/CodeGen/Hexagon/hasfp-crash2.ll
@@ -0,0 +1,83 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check that this testcase does not crash.
+; CHECK: call foo0
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+declare void @foo0() local_unnamed_addr #0
+
+; Function Attrs: nounwind
+define void @foo1() local_unnamed_addr #0 !dbg !33 {
+entry:
+ tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !51, metadata !52), !dbg !53
+ tail call void @foo0(), !dbg !54
+ ret void
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+attributes #0 = { nounwind "disable-tail-calls"="true" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv5" "target-features"="-hvx-double,-long-calls" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!26, !27}
+!llvm.linker.options = !{!29, !30, !31, !32, !29, !30, !31, !32, !29, !30, !31, !32, !29, !30, !31, !32, !29, !30, !31, !32, !29, !30, !31, !32}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Clang", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !22)
+!1 = !DIFile(filename: "foo.i", directory: "/path")
+!2 = !{!3, !16}
+!3 = !DICompositeType(tag: DW_TAG_enumeration_type, file: !4, line: 122, size: 8, elements: !5)
+!4 = !DIFile(filename: "foo.h", directory: "/path")
+!5 = !{!6, !7, !8, !9, !10, !11, !12, !13, !14, !15}
+!6 = !DIEnumerator(name: "E0", value: 7)
+!7 = !DIEnumerator(name: "E1", value: 6)
+!8 = !DIEnumerator(name: "E2", value: 5)
+!9 = !DIEnumerator(name: "E3", value: 0)
+!10 = !DIEnumerator(name: "E4", value: 1)
+!11 = !DIEnumerator(name: "E5", value: 7)
+!12 = !DIEnumerator(name: "E6", value: 5)
+!13 = !DIEnumerator(name: "E7", value: 4)
+!14 = !DIEnumerator(name: "E8", value: 4)
+!15 = !DIEnumerator(name: "E9", value: 10)
+!16 = !DICompositeType(tag: DW_TAG_enumeration_type, file: !4, line: 136, size: 8, elements: !17)
+!17 = !{!18, !19, !20, !21}
+!18 = !DIEnumerator(name: "F0", value: 1)
+!19 = !DIEnumerator(name: "F1", value: 2)
+!20 = !DIEnumerator(name: "F2", value: 4)
+!21 = !DIEnumerator(name: "F3", value: 7)
+!22 = !{!23, !24, !25}
+!23 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!24 = !DIBasicType(name: "long long unsigned int", size: 64, encoding: DW_ATE_unsigned)
+!25 = !DIDerivedType(tag: DW_TAG_typedef, name: "t0_t", file: !4, line: 38, baseType: !24)
+!26 = !{i32 2, !"Debug Info Version", i32 3}
+!27 = !{i32 6, !"Linker Options", !28}
+!28 = !{!29, !30, !31, !32}
+!29 = !{!"foo0", !".text"}
+!30 = !{!"foo1", !".text"}
+!31 = !{!"foo2", !".text"}
+!32 = !{!"foo3", !".text"}
+!33 = distinct !DISubprogram(name: "foo1", scope: !34, file: !34, line: 84, type: !35, isLocal: false, isDefinition: true, scopeLine: 85, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !44)
+!34 = !DIFile(filename: "foo.c", directory: "/path")
+!35 = !DISubroutineType(types: !36)
+!36 = !{!37, !38, !39, !40, !41, !42, !43, !37}
+!37 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!38 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 32)
+!39 = !DIDerivedType(tag: DW_TAG_typedef, name: "t1_t", file: !4, line: 35, baseType: !23)
+!40 = !DIDerivedType(tag: DW_TAG_typedef, name: "t2_t", file: !4, line: 36, baseType: !23)
+!41 = !DIDerivedType(tag: DW_TAG_typedef, name: "t3_t", file: !4, line: 43, baseType: !23)
+!42 = !DIDerivedType(tag: DW_TAG_typedef, name: "t4_t", file: !4, line: 133, baseType: !3)
+!43 = !DIDerivedType(tag: DW_TAG_typedef, name: "t5_t", file: !4, line: 141, baseType: !16)
+!44 = !{!45, !46, !47, !48, !49, !50, !51}
+!45 = !DILocalVariable(name: "a0", arg: 1, scope: !33, file: !34, line: 84, type: !38)
+!46 = !DILocalVariable(name: "a1", arg: 2, scope: !33, file: !34, line: 84, type: !39)
+!47 = !DILocalVariable(name: "a2", arg: 3, scope: !33, file: !34, line: 84, type: !40)
+!48 = !DILocalVariable(name: "a3", arg: 4, scope: !33, file: !34, line: 84, type: !41)
+!49 = !DILocalVariable(name: "a4", arg: 5, scope: !33, file: !34, line: 84, type: !42)
+!50 = !DILocalVariable(name: "a5", arg: 6, scope: !33, file: !34, line: 84, type: !43)
+!51 = !DILocalVariable(name: "a6", arg: 7, scope: !33, file: !34, line: 84, type: !37)
+!52 = !DIExpression()
+!53 = !DILocation(line: 84, column: 169, scope: !33)
+!54 = !DILocation(line: 86, column: 12, scope: !33)
diff --git a/test/CodeGen/Hexagon/hvx-nontemporal.ll b/test/CodeGen/Hexagon/hvx-nontemporal.ll
new file mode 100644
index 000000000000..98c5ef4809b0
--- /dev/null
+++ b/test/CodeGen/Hexagon/hvx-nontemporal.ll
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+target triple = "hexagon"
+
+; Function Attrs: norecurse nounwind
+define void @test(<32 x i32>* nocapture readonly %x, <32 x i32>* nocapture readnone %y, <32 x i32>* nocapture %a, <32 x i32>* nocapture %b) #0 {
+entry:
+; CHECK: v0 = vmem(r0+#7):nt
+ %add.ptr = getelementptr inbounds <32 x i32>, <32 x i32>* %x, i32 7
+ %0 = load <32 x i32>, <32 x i32>* %add.ptr, align 128, !tbaa !1, !nontemporal !4
+
+; CHECK: v1.cur = vmem(r2+#0):nt
+ %1 = load <32 x i32>, <32 x i32>* %a, align 128, !tbaa !1, !nontemporal !4
+
+; CHECK: vmem(r3+#3):nt = v1
+ %add.ptr2 = getelementptr inbounds <32 x i32>, <32 x i32>* %b, i32 3
+ store <32 x i32> %1, <32 x i32>* %add.ptr2, align 128, !tbaa !1, !nontemporal !4
+
+; CHECK: vmem(r2+#0):nt = v0
+ store <32 x i32> %0, <32 x i32>* %a, align 128, !tbaa !1, !nontemporal !4
+ ret void
+}
+
+attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" }
+
+!1 = !{!2, !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{i32 1}
diff --git a/test/CodeGen/Hexagon/target-flag-ext.mir b/test/CodeGen/Hexagon/target-flag-ext.mir
new file mode 100644
index 000000000000..49e0d2870e00
--- /dev/null
+++ b/test/CodeGen/Hexagon/target-flag-ext.mir
@@ -0,0 +1,24 @@
+# RUN: llc -march=hexagon -run-pass hexagon-packetizer -o - %s | FileCheck %s
+---
+name: fred
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ ; Check that all these instructions go in the same packet. This is to
+ ; make sure that a target flag (other than HMOTF_ConstExtend) on an
+ ; operand will not be interpreted as a constant-extender flag.
+ ; The combination used below (pcrel + 0) does not technically make sense,
+ ; but combinations that do make sense require constant extending, so
+ ; testing this is not possible otherwise.
+
+ ; CHECK: BUNDLE
+ ; CHECK-DAG: %r0 = A2_tfrsi
+ ; CHECK-DAG: %r1 = A2_tfrsi
+ ; CHECK-DAG: %r2 = A2_tfrsi
+ ; CHECK: }
+ %r0 = A2_tfrsi target-flags (hexagon-pcrel) 0
+ %r1 = A2_tfrsi target-flags (hexagon-pcrel) 0
+ %r2 = A2_tfrsi target-flags (hexagon-pcrel) 0
+...
+
diff --git a/test/CodeGen/MIR/AArch64/atomic-memoperands.mir b/test/CodeGen/MIR/AArch64/atomic-memoperands.mir
index 1fe42a731488..1c81f580bee5 100644
--- a/test/CodeGen/MIR/AArch64/atomic-memoperands.mir
+++ b/test/CodeGen/MIR/AArch64/atomic-memoperands.mir
@@ -14,7 +14,7 @@
# CHECK: %3(s16) = G_LOAD %0(p0) :: (load acquire 2)
# CHECK: G_STORE %3(s16), %0(p0) :: (store release 2)
# CHECK: G_STORE %2(s32), %0(p0) :: (store acq_rel 4)
-# CHECK: G_STORE %1(s64), %0(p0) :: (store singlethread seq_cst 8)
+# CHECK: G_STORE %1(s64), %0(p0) :: (store syncscope("singlethread") seq_cst 8)
name: atomic_memoperands
body: |
bb.0:
@@ -25,6 +25,6 @@ body: |
%3:_(s16) = G_LOAD %0(p0) :: (load acquire 2)
G_STORE %3(s16), %0(p0) :: (store release 2)
G_STORE %2(s32), %0(p0) :: (store acq_rel 4)
- G_STORE %1(s64), %0(p0) :: (store singlethread seq_cst 8)
+ G_STORE %1(s64), %0(p0) :: (store syncscope("singlethread") seq_cst 8)
RET_ReallyLR
...
diff --git a/test/CodeGen/MIR/AArch64/invalid-target-memoperands.mir b/test/CodeGen/MIR/AArch64/invalid-target-memoperands.mir
new file mode 100644
index 000000000000..731d7165b9df
--- /dev/null
+++ b/test/CodeGen/MIR/AArch64/invalid-target-memoperands.mir
@@ -0,0 +1,19 @@
+# RUN: not llc -mtriple=aarch64-none-linux-gnu -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
+
+--- |
+
+ define void @target_memoperands_error() {
+ ret void
+ }
+
+...
+---
+name: target_memoperands_error
+body: |
+ bb.0:
+
+ %0:_(p0) = COPY %x0
+ ; CHECK: [[@LINE+1]]:35: use of undefined target MMO flag 'aarch64-invalid'
+ %1:_(s64) = G_LOAD %0(p0) :: ("aarch64-invalid" load 8)
+ RET_ReallyLR
+...
diff --git a/test/CodeGen/MIR/AArch64/target-memoperands.mir b/test/CodeGen/MIR/AArch64/target-memoperands.mir
new file mode 100644
index 000000000000..f853b551e098
--- /dev/null
+++ b/test/CodeGen/MIR/AArch64/target-memoperands.mir
@@ -0,0 +1,22 @@
+# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s | FileCheck %s
+
+--- |
+
+ define void @target_memoperands() {
+ ret void
+ }
+
+...
+---
+# CHECK-LABEL: name: target_memoperands
+# CHECK: %1(s64) = G_LOAD %0(p0) :: ("aarch64-suppress-pair" load 8)
+# CHECK: G_STORE %1(s64), %0(p0) :: ("aarch64-suppress-pair" store 8)
+name: target_memoperands
+body: |
+ bb.0:
+
+ %0:_(p0) = COPY %x0
+ %1:_(s64) = G_LOAD %0(p0) :: ("aarch64-suppress-pair" load 8)
+ G_STORE %1(s64), %0(p0) :: ("aarch64-suppress-pair" store 8)
+ RET_ReallyLR
+...
diff --git a/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir b/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
index 7cef01c9d12d..c0251232fd5c 100644
--- a/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
+++ b/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
@@ -171,8 +171,8 @@ body: |
# CHECK-LABEL: name: add_f32_1.0_multi_f16_use
# CHECK: %13 = V_MOV_B32_e32 1065353216, implicit %exec
-# CHECK: %14 = V_ADD_F16_e32 %13, killed %11, implicit %exec
-# CHECK: %15 = V_ADD_F16_e32 killed %13, killed %12, implicit %exec
+# CHECK: %14 = V_ADD_F16_e32 killed %11, %13, implicit %exec
+# CHECK: %15 = V_ADD_F16_e32 killed %12, killed %13, implicit %exec
name: add_f32_1.0_multi_f16_use
@@ -307,8 +307,8 @@ body: |
# CHECK-LABEL: name: add_f32_1.0_one_f32_use_multi_f16_use
# CHECK: %14 = V_MOV_B32_e32 1065353216, implicit %exec
-# CHECK: %15 = V_ADD_F16_e32 %14, %11, implicit %exec
-# CHECK: %16 = V_ADD_F16_e32 %14, %12, implicit %exec
+# CHECK: %15 = V_ADD_F16_e32 %11, %14, implicit %exec
+# CHECK: %16 = V_ADD_F16_e32 %12, %14, implicit %exec
# CHECK: %17 = V_ADD_F32_e32 1065353216, killed %13, implicit %exec
name: add_f32_1.0_one_f32_use_multi_f16_use
@@ -514,8 +514,8 @@ body: |
# CHECK-LABEL: name: add_f16_1.0_multi_f32_use
# CHECK: %13 = V_MOV_B32_e32 15360, implicit %exec
-# CHECK: %14 = V_ADD_F32_e32 %13, %11, implicit %exec
-# CHECK: %15 = V_ADD_F32_e32 %13, %12, implicit %exec
+# CHECK: %14 = V_ADD_F32_e32 %11, %13, implicit %exec
+# CHECK: %15 = V_ADD_F32_e32 %12, %13, implicit %exec
name: add_f16_1.0_multi_f32_use
alignment: 0
@@ -581,8 +581,8 @@ body: |
# CHECK-LABEL: name: add_f16_1.0_other_high_bits_multi_f16_use
# CHECK: %13 = V_MOV_B32_e32 80886784, implicit %exec
-# CHECK: %14 = V_ADD_F16_e32 %13, %11, implicit %exec
-# CHECK: %15 = V_ADD_F16_e32 %13, %12, implicit %exec
+# CHECK: %14 = V_ADD_F16_e32 %11, %13, implicit %exec
+# CHECK: %15 = V_ADD_F16_e32 %12, %13, implicit %exec
name: add_f16_1.0_other_high_bits_multi_f16_use
alignment: 0
@@ -648,8 +648,8 @@ body: |
# CHECK-LABEL: name: add_f16_1.0_other_high_bits_use_f16_f32
# CHECK: %13 = V_MOV_B32_e32 305413120, implicit %exec
-# CHECK: %14 = V_ADD_F32_e32 %13, %11, implicit %exec
-# CHECK: %15 = V_ADD_F16_e32 %13, %12, implicit %exec
+# CHECK: %14 = V_ADD_F32_e32 %11, %13, implicit %exec
+# CHECK: %15 = V_ADD_F16_e32 %12, %13, implicit %exec
name: add_f16_1.0_other_high_bits_use_f16_f32
alignment: 0
exposesReturnsTwice: false
diff --git a/test/CodeGen/MIR/AMDGPU/syncscopes.mir b/test/CodeGen/MIR/AMDGPU/syncscopes.mir
new file mode 100644
index 000000000000..83506257d8bf
--- /dev/null
+++ b/test/CodeGen/MIR/AMDGPU/syncscopes.mir
@@ -0,0 +1,98 @@
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -run-pass=none %s -o - | FileCheck --check-prefix=GCN %s
+
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "<stdin>"
+ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+ target triple = "amdgcn-amd-amdhsa"
+
+ define void @syncscopes(i32 %agent, i32 addrspace(4)* %agent_out, i32 %workgroup, i32 addrspace(4)* %workgroup_out, i32 %wavefront, i32 addrspace(4)* %wavefront_out) #0 {
+ entry:
+ store atomic i32 %agent, i32 addrspace(4)* %agent_out syncscope("agent") seq_cst, align 4
+ store atomic i32 %workgroup, i32 addrspace(4)* %workgroup_out syncscope("workgroup") seq_cst, align 4
+ store atomic i32 %wavefront, i32 addrspace(4)* %wavefront_out syncscope("wavefront") seq_cst, align 4
+ ret void
+ }
+
+ ; Function Attrs: convergent nounwind
+ declare { i1, i64 } @llvm.amdgcn.if(i1) #1
+
+ ; Function Attrs: convergent nounwind
+ declare { i1, i64 } @llvm.amdgcn.else(i64) #1
+
+ ; Function Attrs: convergent nounwind readnone
+ declare i64 @llvm.amdgcn.break(i64) #2
+
+ ; Function Attrs: convergent nounwind readnone
+ declare i64 @llvm.amdgcn.if.break(i1, i64) #2
+
+ ; Function Attrs: convergent nounwind readnone
+ declare i64 @llvm.amdgcn.else.break(i64, i64) #2
+
+ ; Function Attrs: convergent nounwind
+ declare i1 @llvm.amdgcn.loop(i64) #1
+
+ ; Function Attrs: convergent nounwind
+ declare void @llvm.amdgcn.end.cf(i64) #1
+
+ attributes #0 = { "target-cpu"="gfx803" }
+ attributes #1 = { convergent nounwind }
+ attributes #2 = { convergent nounwind readnone }
+
+# GCN-LABEL: name: syncscopes
+# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out)
+# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out)
+# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out)
+...
+---
+name: syncscopes
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%sgpr4_sgpr5' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %sgpr4_sgpr5
+
+ S_WAITCNT 0
+ %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr4_sgpr5, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %sgpr6 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+ %sgpr2_sgpr3 = S_LOAD_DWORDX2_IMM %sgpr4_sgpr5, 24, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %sgpr7 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 16, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+ %sgpr8 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 32, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+ S_WAITCNT 127
+ %vgpr0 = V_MOV_B32_e32 %sgpr0, implicit %exec, implicit-def %vgpr0_vgpr1, implicit %sgpr0_sgpr1
+ %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr4_sgpr5, 40, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %vgpr1 = V_MOV_B32_e32 killed %sgpr1, implicit %exec, implicit killed %sgpr0_sgpr1, implicit %sgpr0_sgpr1, implicit %exec
+ %vgpr2 = V_MOV_B32_e32 killed %sgpr6, implicit %exec, implicit %exec
+ FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out)
+ S_WAITCNT 112
+ %vgpr0 = V_MOV_B32_e32 %sgpr2, implicit %exec, implicit-def %vgpr0_vgpr1, implicit %sgpr2_sgpr3
+ %vgpr1 = V_MOV_B32_e32 killed %sgpr3, implicit %exec, implicit killed %sgpr2_sgpr3, implicit %sgpr2_sgpr3, implicit %exec
+ %vgpr2 = V_MOV_B32_e32 killed %sgpr7, implicit %exec, implicit %exec
+ FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out)
+ S_WAITCNT 112
+ %vgpr0 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr0_vgpr1, implicit %sgpr4_sgpr5
+ %vgpr1 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit killed %sgpr4_sgpr5, implicit %sgpr4_sgpr5, implicit %exec
+ %vgpr2 = V_MOV_B32_e32 killed %sgpr8, implicit %exec, implicit %exec
+ FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out)
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/MIR/AMDGPU/target-flags.mir b/test/CodeGen/MIR/AMDGPU/target-flags.mir
new file mode 100644
index 000000000000..7d288dd1b045
--- /dev/null
+++ b/test/CodeGen/MIR/AMDGPU/target-flags.mir
@@ -0,0 +1,29 @@
+# RUN: llc -march=amdgcn -run-pass none -o - %s | FileCheck %s
+--- |
+ define amdgpu_kernel void @flags() {
+ ret void
+ }
+
+ declare void @foo()
+...
+---
+
+# CHECK: SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @foo + 4, target-flags(amdgpu-rel32-hi) @foo + 4, implicit-def dead %scc
+# CHECK: %1 = S_MOV_B64 target-flags(amdgpu-gotprel) @foo
+
+name: flags
+liveins:
+ - { reg: '%sgpr0_sgpr1' }
+frameInfo:
+ maxAlignment: 8
+registers:
+ - { id: 0, class: sreg_64, preferred-register: '' }
+ - { id: 1, class: sreg_64, preferred-register: '' }
+body: |
+ bb.0:
+ liveins: %sgpr0_sgpr1
+ %0 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @foo + 4, target-flags(amdgpu-rel32-hi) @foo + 4, implicit-def dead %scc
+ %1 = S_MOV_B64 target-flags(amdgpu-gotprel) @foo
+
+ S_ENDPGM
+...
diff --git a/test/CodeGen/MIR/Generic/runPass.mir b/test/CodeGen/MIR/Generic/runPass.mir
index 33380d4c6bb4..54c1dd221bdb 100644
--- a/test/CodeGen/MIR/Generic/runPass.mir
+++ b/test/CodeGen/MIR/Generic/runPass.mir
@@ -1,5 +1,6 @@
# RUN: llc -run-pass=greedy -debug-pass=Arguments -o - %s | FileCheck %s
# RUN: llc -run-pass=regallocbasic -debug-pass=Arguments -o - %s | FileCheck %s
+# RUN: llc -run-pass=regallocfast -debug-pass=Arguments -o - %s | FileCheck %s
# Check that passes are initialized correctly, so that it's possible to
# use -run-pass.
@@ -7,6 +8,7 @@
---
# CHECK: name: foo
name: foo
+tracksRegLiveness: true
body: |
bb.0:
...
diff --git a/test/CodeGen/MIR/Hexagon/target-flags.mir b/test/CodeGen/MIR/Hexagon/target-flags.mir
new file mode 100644
index 000000000000..656e0a6ea859
--- /dev/null
+++ b/test/CodeGen/MIR/Hexagon/target-flags.mir
@@ -0,0 +1,36 @@
+# RUN: llc -march=hexagon -run-pass none -o - %s | FileCheck %s
+---
+name: fred
+
+body: |
+ bb.0:
+
+ ; CHECK: target-flags(hexagon-pcrel)
+ %r0 = A2_tfrsi target-flags (hexagon-pcrel) 0
+ ; CHECK: target-flags(hexagon-got)
+ %r0 = A2_tfrsi target-flags (hexagon-got) 0
+ ; CHECK: target-flags(hexagon-lo16)
+ %r0 = A2_tfrsi target-flags (hexagon-lo16) 0
+ ; CHECK: target-flags(hexagon-hi16)
+ %r0 = A2_tfrsi target-flags (hexagon-hi16) 0
+ ; CHECK: target-flags(hexagon-gprel)
+ %r0 = A2_tfrsi target-flags (hexagon-gprel) 0
+ ; CHECK: target-flags(hexagon-gdgot)
+ %r0 = A2_tfrsi target-flags (hexagon-gdgot) 0
+ ; CHECK: target-flags(hexagon-gdplt)
+ %r0 = A2_tfrsi target-flags (hexagon-gdplt) 0
+ ; CHECK: target-flags(hexagon-ie)
+ %r0 = A2_tfrsi target-flags (hexagon-ie) 0
+ ; CHECK: target-flags(hexagon-iegot)
+ %r0 = A2_tfrsi target-flags (hexagon-iegot) 0
+ ; CHECK: target-flags(hexagon-tprel)
+ %r0 = A2_tfrsi target-flags (hexagon-tprel) 0
+
+ ; CHECK: target-flags(hexagon-ext)
+ %r0 = A2_tfrsi target-flags (hexagon-ext) 0
+ ; CHECK: target-flags(hexagon-pcrel, hexagon-ext)
+ %r0 = A2_tfrsi target-flags (hexagon-pcrel,hexagon-ext) 0
+ ; CHECK: target-flags(hexagon-ie, hexagon-ext)
+ %r0 = A2_tfrsi target-flags (hexagon-ie,hexagon-ext) 0
+...
+
diff --git a/test/CodeGen/MIR/X86/tied-physical-regs-match.mir b/test/CodeGen/MIR/X86/tied-physical-regs-match.mir
new file mode 100644
index 000000000000..1ddf649f76a7
--- /dev/null
+++ b/test/CodeGen/MIR/X86/tied-physical-regs-match.mir
@@ -0,0 +1,22 @@
+# RUN: not llc -march=x86-64 -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
+# This test ensures that the Machine Verifier detects tied physical registers
+# that doesn't match.
+
+--- |
+
+ define i32 @foo() {
+ entry:
+ ret i32 0
+ }
+
+...
+---
+name: foo
+body: |
+ bb.0.entry:
+ liveins: %rdi
+
+ ; CHECK: Tied physical registers must match.
+ %rbx = AND64rm killed %rdx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags
+ RETQ %rbx
+...
diff --git a/test/CodeGen/MSP430/Inst16mm.ll b/test/CodeGen/MSP430/Inst16mm.ll
index 951002d60a03..14a799b91717 100644
--- a/test/CodeGen/MSP430/Inst16mm.ll
+++ b/test/CodeGen/MSP430/Inst16mm.ll
@@ -64,6 +64,6 @@ entry:
%0 = load i16, i16* %retval ; <i16> [#uses=1]
ret i16 %0
; CHECK-LABEL: mov2:
-; CHECK: mov.w 0(r1), 4(r1)
-; CHECK: mov.w 2(r1), 6(r1)
+; CHECK-DAG: mov.w 2(r1), 6(r1)
+; CHECK-DAG: mov.w 0(r1), 4(r1)
}
diff --git a/test/CodeGen/NVPTX/lower-aggr-copies.ll b/test/CodeGen/NVPTX/lower-aggr-copies.ll
index f522c6722ee6..4298442157e2 100644
--- a/test/CodeGen/NVPTX/lower-aggr-copies.ll
+++ b/test/CodeGen/NVPTX/lower-aggr-copies.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 -O0 | FileCheck %s --check-prefix PTX
; RUN: opt < %s -S -nvptx-lower-aggr-copies | FileCheck %s --check-prefix IR
+; RUN: opt < %s -S -nvptx-lower-aggr-copies -use-wide-memcpy-loop-lowering=true | FileCheck %s --check-prefix WIR
; Verify that the NVPTXLowerAggrCopies pass works as expected - calls to
; llvm.mem* intrinsics get lowered to loops.
@@ -32,6 +33,23 @@ entry:
; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
; PTX: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
; PTX: @%p[[PRED]] bra LBB[[LABEL]]
+
+; WIR-LABEL: @memcpy_caller
+; WIR: entry:
+; WIR: [[LoopCount:%[0-9]+]] = udiv i64 %n, 1
+; WIR: [[ResidualSize:%[0-9]+]] = urem i64 %n, 1
+; WIR: [[Cond:%[0-9]+]] = icmp ne i64 [[LoopCount]], 0
+; WIR: br i1 [[Cond]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion
+
+; WIR: loop-memcpy-expansion:
+; WIR: %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %loop-memcpy-expansion ]
+; WIR: [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
+; WIR: [[Load:%[0-9]+]] = load i8, i8* [[SrcGep]]
+; WIR: [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
+; WIR: store i8 [[Load]], i8* [[DstGep]]
+; WIR: [[IndexInc]] = add i64 %loop-index, 1
+; WIR: [[Cond2:%[0-9]+]] = icmp ult i64 [[IndexInc]], [[LoopCount]]
+; WIR: br i1 [[Cond2]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion
}
define i8* @memcpy_volatile_caller(i8* %dst, i8* %src, i64 %n) #0 {
@@ -50,6 +68,23 @@ entry:
; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
; PTX: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
; PTX: @%p[[PRED]] bra LBB[[LABEL]]
+
+; WIR-LABEL: @memcpy_volatile_caller
+; WIR: entry:
+; WIR: [[LoopCount:%[0-9]+]] = udiv i64 %n, 1
+; WIR: [[ResidualSize:%[0-9]+]] = urem i64 %n, 1
+; WIR: [[Cond:%[0-9]+]] = icmp ne i64 [[LoopCount]], 0
+; WIR: br i1 [[Cond]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion
+
+; WIR: loop-memcpy-expansion:
+; WIR: %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %loop-memcpy-expansion ]
+; WIR: [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
+; WIR: [[Load:%[0-9]+]] = load volatile i8, i8* [[SrcGep]]
+; WIR: [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
+; WIR: store volatile i8 [[Load]], i8* [[DstGep]]
+; WIR: [[IndexInc]] = add i64 %loop-index, 1
+; WIR: [[Cond2:%[0-9]+]] = icmp ult i64 [[IndexInc]], [[LoopCount]]
+; WIR: br i1 [[Cond2]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion
}
define i8* @memcpy_casting_caller(i32* %dst, i32* %src, i64 %n) #0 {
@@ -65,6 +100,32 @@ entry:
; IR: [[SRCCAST:%[0-9]+]] = bitcast i32* %src to i8*
; IR: getelementptr inbounds i8, i8* [[SRCCAST]]
; IR: getelementptr inbounds i8, i8* [[DSTCAST]]
+
+; WIR-LABEL: @memcpy_casting_caller
+; WIR: [[DSTCAST:%[0-9]+]] = bitcast i32* %dst to i8*
+; WIR: [[SRCCAST:%[0-9]+]] = bitcast i32* %src to i8*
+; WIR: getelementptr inbounds i8, i8* [[SRCCAST]]
+; WIR: getelementptr inbounds i8, i8* [[DSTCAST]]
+}
+
+define i8* @memcpy_known_size(i8* %dst, i8* %src) {
+entry:
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 144, i32 1, i1 false)
+ ret i8* %dst
+
+; Check that calls with compile-time constant size are handled correctly
+; WIR-LABEL: @memcpy_known_size
+; WIR: entry:
+; WIR: br label %load-store-loop
+; WIR: load-store-loop:
+; WIR: %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %load-store-loop ]
+; WIR: [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
+; WIR: [[Load:%[0-9]+]] = load i8, i8* [[SrcGep]]
+; WIR: [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
+; WIR: store i8 [[Load]], i8* [[DstGep]]
+; WIR: [[IndexInc]] = add i64 %loop-index, 1
+; WIR: [[Cond:%[0-9]+]] = icmp ult i64 %3, 144
+; WIR: br i1 [[Cond]], label %load-store-loop, label %memcpy-split
}
define i8* @memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
diff --git a/test/CodeGen/PowerPC/PR33636.ll b/test/CodeGen/PowerPC/PR33636.ll
new file mode 100644
index 000000000000..4a1216dd4c11
--- /dev/null
+++ b/test/CodeGen/PowerPC/PR33636.ll
@@ -0,0 +1,702 @@
+; Just a test case for a crash reported in
+; https://bugs.llvm.org/show_bug.cgi?id=33636
+; RUN: llc -mtriple=powerpc64le-unknown-unknown -mcpu=pwr8 < %s | FileCheck %s
+@g_225 = external unnamed_addr global i16, align 2
+@g_756 = external global [6 x i32], align 4
+@g_3456 = external global i32, align 4
+@g_3708 = external global [9 x i32], align 4
+@g_1252 = external global i8*, align 8
+@g_3043 = external global float*, align 8
+
+; Function Attrs: nounwind
+define void @main() {
+ br i1 undef, label %1, label %4
+
+; <label>:1: ; preds = %0
+ br i1 undef, label %2, label %3
+
+; <label>:2: ; preds = %1
+ br label %3
+
+; <label>:3: ; preds = %2, %1
+ br label %4
+
+; <label>:4: ; preds = %3, %0
+ br label %5
+
+; <label>:5: ; preds = %5, %4
+ br i1 undef, label %6, label %5
+
+; <label>:6: ; preds = %5
+ br i1 undef, label %7, label %8
+
+; <label>:7: ; preds = %6
+ br i1 undef, label %70, label %69
+
+; <label>:8: ; preds = %6
+ br i1 undef, label %9, label %50
+
+; <label>:9: ; preds = %8
+ br label %11
+
+; <label>:10: ; preds = %28
+ br i1 undef, label %11, label %12
+
+; <label>:11: ; preds = %10, %9
+ br label %13
+
+; <label>:12: ; preds = %10
+ br label %30
+
+; <label>:13: ; preds = %23, %11
+ br i1 undef, label %17, label %14
+
+; <label>:14: ; preds = %13
+ br i1 undef, label %16, label %15
+
+; <label>:15: ; preds = %14
+ br label %22
+
+; <label>:16: ; preds = %14
+ br label %17
+
+; <label>:17: ; preds = %16, %13
+ br i1 undef, label %18, label %19
+
+; <label>:18: ; preds = %17
+ br label %19
+
+; <label>:19: ; preds = %18, %17
+ br i1 undef, label %48, label %20
+
+; <label>:20: ; preds = %19
+ br i1 undef, label %48, label %21
+
+; <label>:21: ; preds = %20
+ br label %22
+
+; <label>:22: ; preds = %21, %15
+ br i1 undef, label %23, label %24
+
+; <label>:23: ; preds = %22
+ br label %13
+
+; <label>:24: ; preds = %22
+ br i1 undef, label %28, label %25
+
+; <label>:25: ; preds = %24
+ br label %26
+
+; <label>:26: ; preds = %26, %25
+ br i1 undef, label %26, label %27
+
+; <label>:27: ; preds = %26
+ br label %48
+
+; <label>:28: ; preds = %24
+ br i1 undef, label %29, label %10
+
+; <label>:29: ; preds = %28
+ br label %48
+
+; <label>:30: ; preds = %33, %12
+ br i1 undef, label %32, label %33
+
+; <label>:31: ; preds = %33
+ br label %34
+
+; <label>:32: ; preds = %30
+ br label %33
+
+; <label>:33: ; preds = %32, %30
+ br i1 undef, label %30, label %31
+
+; <label>:34: ; preds = %47, %31
+ br i1 undef, label %35, label %36
+
+; <label>:35: ; preds = %34
+ br label %36
+
+; <label>:36: ; preds = %35, %34
+ br label %37
+
+; <label>:37: ; preds = %45, %36
+ br i1 undef, label %40, label %38
+
+; <label>:38: ; preds = %37
+ br i1 undef, label %39, label %46
+
+; <label>:39: ; preds = %38
+ br label %41
+
+; <label>:40: ; preds = %37
+ br label %41
+
+; <label>:41: ; preds = %40, %39
+ br label %42
+
+; <label>:42: ; preds = %44, %41
+ br i1 undef, label %43, label %44
+
+; <label>:43: ; preds = %42
+ br label %44
+
+; <label>:44: ; preds = %43, %42
+ br i1 undef, label %42, label %45
+
+; <label>:45: ; preds = %44
+ br i1 undef, label %37, label %47
+
+; <label>:46: ; preds = %38
+ br label %48
+
+; <label>:47: ; preds = %45
+ br i1 undef, label %34, label %49
+
+; <label>:48: ; preds = %46, %29, %27, %20, %19
+ br label %65
+
+; <label>:49: ; preds = %47
+ br label %58
+
+; <label>:50: ; preds = %8
+ br i1 undef, label %52, label %51
+
+; <label>:51: ; preds = %50
+ br label %57
+
+; <label>:52: ; preds = %50
+ br label %53
+
+; <label>:53: ; preds = %56, %52
+ br i1 undef, label %54, label %59
+
+; <label>:54: ; preds = %53
+ br i1 undef, label %60, label %59
+
+; <label>:55: ; preds = %64
+ br label %56
+
+; <label>:56: ; preds = %64, %55
+ br i1 undef, label %57, label %53
+
+; <label>:57: ; preds = %56, %51
+ br label %58
+
+; <label>:58: ; preds = %57, %49
+ br label %65
+
+; <label>:59: ; preds = %63, %62, %61, %60, %54, %53
+ br label %65
+
+; <label>:60: ; preds = %54
+ br i1 undef, label %61, label %59
+
+; <label>:61: ; preds = %60
+ br i1 undef, label %62, label %59
+
+; <label>:62: ; preds = %61
+ br i1 undef, label %63, label %59
+
+; <label>:63: ; preds = %62
+ br i1 undef, label %64, label %59
+
+; <label>:64: ; preds = %63
+ br i1 undef, label %55, label %56
+
+; <label>:65: ; preds = %59, %58, %48
+ br i1 undef, label %66, label %67
+
+; <label>:66: ; preds = %65
+ br label %67
+
+; <label>:67: ; preds = %66, %65
+ br i1 undef, label %68, label %92
+
+; <label>:68: ; preds = %67
+ br label %92
+
+; <label>:69: ; preds = %7
+ br label %70
+
+; <label>:70: ; preds = %69, %7
+ br i1 undef, label %72, label %71
+
+; <label>:71: ; preds = %70
+ br label %72
+
+; <label>:72: ; preds = %71, %70
+ br i1 undef, label %73, label %74
+
+; <label>:73: ; preds = %72
+ br label %74
+
+; <label>:74: ; preds = %73, %72
+ br i1 undef, label %85, label %75
+
+; <label>:75: ; preds = %74
+ br i1 undef, label %84, label %76
+
+; <label>:76: ; preds = %75
+ br i1 undef, label %78, label %77
+
+; <label>:77: ; preds = %77, %76
+ br i1 undef, label %84, label %77
+
+; <label>:78: ; preds = %76
+ br label %79
+
+; <label>:79: ; preds = %83, %78
+ br i1 undef, label %83, label %80
+
+; <label>:80: ; preds = %79
+ br i1 undef, label %81, label %82
+
+; <label>:81: ; preds = %80
+ br label %83
+
+; <label>:82: ; preds = %80
+ br label %83
+
+; <label>:83: ; preds = %82, %81, %79
+ br i1 undef, label %90, label %79
+
+; <label>:84: ; preds = %77, %75
+ br label %92
+
+; <label>:85: ; preds = %74
+ br i1 undef, label %86, label %88
+
+; <label>:86: ; preds = %85
+ br i1 undef, label %89, label %87
+
+; <label>:87: ; preds = %86
+ br i1 undef, label %89, label %88
+
+; <label>:88: ; preds = %87, %85
+ br label %89
+
+; <label>:89: ; preds = %88, %87, %86
+ br label %92
+
+; <label>:90: ; preds = %83
+ br i1 undef, label %92, label %91
+
+; <label>:91: ; preds = %90
+ br label %92
+
+; <label>:92: ; preds = %91, %90, %89, %84, %68, %67
+ br label %93
+
+; <label>:93: ; preds = %100, %92
+ br label %94
+
+; <label>:94: ; preds = %98, %93
+ br label %95
+
+; <label>:95: ; preds = %97, %94
+ br i1 undef, label %96, label %97
+
+; <label>:96: ; preds = %95
+ br label %97
+
+; <label>:97: ; preds = %96, %95
+ br i1 undef, label %95, label %98
+
+; <label>:98: ; preds = %97
+ store i32 7, i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 7), align 4
+ %99 = load volatile i32, i32* @g_3456, align 4
+ br i1 undef, label %94, label %100
+
+; <label>:100: ; preds = %98
+ br i1 undef, label %93, label %101
+
+; <label>:101: ; preds = %100
+ br label %102
+
+; <label>:102: ; preds = %117, %101
+ br label %103
+
+; <label>:103: ; preds = %109, %102
+ store i8** @g_1252, i8*** undef, align 8
+ br i1 undef, label %105, label %104
+
+; <label>:104: ; preds = %103
+ br label %105
+
+; <label>:105: ; preds = %104, %103
+ %106 = icmp eq i32 0, 0
+ br i1 %106, label %107, label %116
+
+; <label>:107: ; preds = %105
+ br i1 icmp ne (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4)), label %109, label %108
+
+; <label>:108: ; preds = %107
+ br label %109
+
+; <label>:109: ; preds = %108, %107
+ %110 = phi i32 [ sdiv (i32 32, i32 zext (i1 icmp eq (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4)) to i32)), %108 ], [ 32, %107 ]
+ %111 = trunc i32 %110 to i8
+ %112 = icmp ne i8 %111, 0
+ %113 = and i1 %112, icmp eq (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4))
+ %114 = zext i1 %113 to i16
+ store i16 %114, i16* @g_225, align 2
+ %115 = load volatile float*, float** @g_3043, align 8
+ br i1 undef, label %103, label %117
+
+; <label>:116: ; preds = %105
+ br label %119
+
+; <label>:117: ; preds = %109
+ br i1 undef, label %102, label %118
+
+; <label>:118: ; preds = %117
+ br label %119
+
+; <label>:119: ; preds = %118, %116
+ br i1 undef, label %120, label %231
+
+; <label>:120: ; preds = %119
+ br label %232
+
+; <label>:121: ; preds = %230
+ br label %122
+
+; <label>:122: ; preds = %230, %121
+ br i1 undef, label %124, label %123
+
+; <label>:123: ; preds = %122
+ br label %124
+
+; <label>:124: ; preds = %123, %122
+ br i1 undef, label %228, label %225
+
+; <label>:125: ; preds = %218
+ br label %127
+
+; <label>:126: ; preds = %218
+ br label %127
+
+; <label>:127: ; preds = %216, %126, %125
+ br i1 undef, label %204, label %128
+
+; <label>:128: ; preds = %127
+ br label %205
+
+; <label>:129: ; preds = %216
+ br i1 undef, label %131, label %130
+
+; <label>:130: ; preds = %129
+ br label %131
+
+; <label>:131: ; preds = %130, %129
+ br i1 undef, label %133, label %132
+
+; <label>:132: ; preds = %131
+ br label %133
+
+; <label>:133: ; preds = %132, %131
+ br label %134
+
+; <label>:134: ; preds = %203, %133
+ br i1 undef, label %193, label %135
+
+; <label>:135: ; preds = %134
+ br label %194
+
+; <label>:136: ; preds = %203
+ br i1 undef, label %138, label %137
+
+; <label>:137: ; preds = %136
+ br label %138
+
+; <label>:138: ; preds = %137, %136
+ br i1 undef, label %192, label %139
+
+; <label>:139: ; preds = %138
+ br label %191
+
+; <label>:140: ; preds = %191, %190
+ br i1 undef, label %180, label %141
+
+; <label>:141: ; preds = %140
+ br label %181
+
+; <label>:142: ; preds = %190
+ br i1 undef, label %143, label %178
+
+; <label>:143: ; preds = %142
+ br label %179
+
+; <label>:144: ; preds = %179
+ br label %176
+
+; <label>:145: ; preds = %179
+ br label %176
+
+; <label>:146: ; preds = %177, %175, %174
+ br i1 undef, label %165, label %147
+
+; <label>:147: ; preds = %146
+ br label %166
+
+; <label>:148: ; preds = %174
+ br label %149
+
+; <label>:149: ; preds = %164, %148
+ br i1 undef, label %154, label %150
+
+; <label>:150: ; preds = %149
+ br label %155
+
+; <label>:151: ; preds = %164
+ br i1 undef, label %153, label %152
+
+; <label>:152: ; preds = %151
+ br label %153
+
+; <label>:153: ; preds = %152, %151
+ ret void
+
+; <label>:154: ; preds = %149
+ br label %155
+
+; <label>:155: ; preds = %154, %150
+ br i1 undef, label %157, label %156
+
+; <label>:156: ; preds = %155
+ br label %158
+
+; <label>:157: ; preds = %155
+ br label %158
+
+; <label>:158: ; preds = %157, %156
+ br i1 undef, label %160, label %159
+
+; <label>:159: ; preds = %158
+ br label %161
+
+; <label>:160: ; preds = %158
+ br label %161
+
+; <label>:161: ; preds = %160, %159
+ br i1 undef, label %163, label %162
+
+; <label>:162: ; preds = %161
+ br label %164
+
+; <label>:163: ; preds = %161
+ br label %164
+
+; <label>:164: ; preds = %163, %162
+ br i1 undef, label %151, label %149
+
+; <label>:165: ; preds = %146
+ br label %166
+
+; <label>:166: ; preds = %165, %147
+ br i1 undef, label %168, label %167
+
+; <label>:167: ; preds = %166
+ br label %169
+
+; <label>:168: ; preds = %166
+ br label %169
+
+; <label>:169: ; preds = %168, %167
+ br i1 undef, label %171, label %170
+
+; <label>:170: ; preds = %169
+ br label %172
+
+; <label>:171: ; preds = %169
+ br label %172
+
+; <label>:172: ; preds = %171, %170
+ br i1 undef, label %174, label %173
+
+; <label>:173: ; preds = %172
+ br label %174
+
+; <label>:174: ; preds = %173, %172
+ br i1 undef, label %148, label %146
+
+; <label>:175: ; preds = %176
+ br label %146
+
+; <label>:176: ; preds = %145, %144
+ br i1 undef, label %177, label %175
+
+; <label>:177: ; preds = %176
+ br label %146
+
+; <label>:178: ; preds = %142
+ br label %179
+
+; <label>:179: ; preds = %178, %143
+ br i1 undef, label %145, label %144
+
+; <label>:180: ; preds = %140
+ br label %181
+
+; <label>:181: ; preds = %180, %141
+ br i1 undef, label %183, label %182
+
+; <label>:182: ; preds = %181
+ br label %184
+
+; <label>:183: ; preds = %181
+ br label %184
+
+; <label>:184: ; preds = %183, %182
+ br i1 undef, label %186, label %185
+
+; <label>:185: ; preds = %184
+ br label %187
+
+; <label>:186: ; preds = %184
+ br label %187
+
+; <label>:187: ; preds = %186, %185
+ br i1 undef, label %189, label %188
+
+; <label>:188: ; preds = %187
+ br label %190
+
+; <label>:189: ; preds = %187
+ br label %190
+
+; <label>:190: ; preds = %189, %188
+ br i1 undef, label %142, label %140
+
+; <label>:191: ; preds = %192, %139
+ br label %140
+
+; <label>:192: ; preds = %138
+ br label %191
+
+; <label>:193: ; preds = %134
+ br label %194
+
+; <label>:194: ; preds = %193, %135
+ br i1 undef, label %196, label %195
+
+; <label>:195: ; preds = %194
+ br label %197
+
+; <label>:196: ; preds = %194
+ br label %197
+
+; <label>:197: ; preds = %196, %195
+ br i1 undef, label %199, label %198
+
+; <label>:198: ; preds = %197
+ br label %200
+
+; <label>:199: ; preds = %197
+ br label %200
+
+; <label>:200: ; preds = %199, %198
+ br i1 undef, label %202, label %201
+
+; <label>:201: ; preds = %200
+ br label %203
+
+; <label>:202: ; preds = %200
+ br label %203
+
+; <label>:203: ; preds = %202, %201
+ br i1 undef, label %136, label %134
+
+; <label>:204: ; preds = %127
+ br label %205
+
+; <label>:205: ; preds = %204, %128
+ br i1 undef, label %207, label %206
+
+; <label>:206: ; preds = %205
+ br label %208
+
+; <label>:207: ; preds = %205
+ br label %208
+
+; <label>:208: ; preds = %207, %206
+ br i1 undef, label %210, label %209
+
+; <label>:209: ; preds = %208
+ br label %211
+
+; <label>:210: ; preds = %208
+ br label %211
+
+; <label>:211: ; preds = %210, %209
+ br i1 undef, label %213, label %212
+
+; <label>:212: ; preds = %211
+ br label %214
+
+; <label>:213: ; preds = %211
+ br label %214
+
+; <label>:214: ; preds = %213, %212
+ br i1 undef, label %216, label %215
+
+; <label>:215: ; preds = %214
+ br label %216
+
+; <label>:216: ; preds = %215, %214
+ br i1 undef, label %129, label %127
+
+; <label>:217: ; preds = %220
+ br label %218
+
+; <label>:218: ; preds = %221, %217
+ br i1 undef, label %126, label %125
+
+; <label>:219: ; preds = %223
+ br label %220
+
+; <label>:220: ; preds = %224, %219
+ br i1 undef, label %221, label %217
+
+; <label>:221: ; preds = %220
+ br label %218
+
+; <label>:222: ; preds = %226
+ br label %223
+
+; <label>:223: ; preds = %227, %222
+ br i1 undef, label %224, label %219
+
+; <label>:224: ; preds = %223
+ br label %220
+
+; <label>:225: ; preds = %124
+ br label %226
+
+; <label>:226: ; preds = %228, %225
+ br i1 undef, label %227, label %222
+
+; <label>:227: ; preds = %226
+ br label %223
+
+; <label>:228: ; preds = %124
+ br label %226
+
+; <label>:229: ; preds = %232
+ br label %230
+
+; <label>:230: ; preds = %233, %229
+ br i1 undef, label %122, label %121
+
+; <label>:231: ; preds = %119
+ br label %232
+
+; <label>:232: ; preds = %231, %120
+ br i1 undef, label %233, label %229
+
+; <label>:233: ; preds = %232
+ br label %230
+
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/atomics-regression.ll b/test/CodeGen/PowerPC/atomics-regression.ll
index d57b3a203791..0c7a31d16b19 100644
--- a/test/CodeGen/PowerPC/atomics-regression.ll
+++ b/test/CodeGen/PowerPC/atomics-regression.ll
@@ -370,7 +370,7 @@ define void @test36() {
; PPC64LE: # BB#0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- fence singlethread acquire
+ fence syncscope("singlethread") acquire
ret void
}
@@ -379,7 +379,7 @@ define void @test37() {
; PPC64LE: # BB#0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- fence singlethread release
+ fence syncscope("singlethread") release
ret void
}
@@ -388,7 +388,7 @@ define void @test38() {
; PPC64LE: # BB#0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- fence singlethread acq_rel
+ fence syncscope("singlethread") acq_rel
ret void
}
@@ -397,7 +397,7 @@ define void @test39() {
; PPC64LE: # BB#0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: blr
- fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
ret void
}
@@ -1273,7 +1273,7 @@ define void @test80(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread monotonic monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") monotonic monotonic
ret void
}
@@ -1294,7 +1294,7 @@ define void @test81(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acquire monotonic
ret void
}
@@ -1315,7 +1315,7 @@ define void @test82(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire acquire
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acquire acquire
ret void
}
@@ -1336,7 +1336,7 @@ define void @test83(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release monotonic
ret void
}
@@ -1357,7 +1357,7 @@ define void @test84(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release acquire
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release acquire
ret void
}
@@ -1379,7 +1379,7 @@ define void @test85(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acq_rel monotonic
ret void
}
@@ -1401,7 +1401,7 @@ define void @test86(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel acquire
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acq_rel acquire
ret void
}
@@ -1423,7 +1423,7 @@ define void @test87(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst monotonic
ret void
}
@@ -1445,7 +1445,7 @@ define void @test88(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst acquire
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst acquire
ret void
}
@@ -1467,7 +1467,7 @@ define void @test89(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst seq_cst
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
@@ -1487,7 +1487,7 @@ define void @test90(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread monotonic monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") monotonic monotonic
ret void
}
@@ -1508,7 +1508,7 @@ define void @test91(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acquire monotonic
ret void
}
@@ -1529,7 +1529,7 @@ define void @test92(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire acquire
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acquire acquire
ret void
}
@@ -1550,7 +1550,7 @@ define void @test93(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release monotonic
ret void
}
@@ -1571,7 +1571,7 @@ define void @test94(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release acquire
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release acquire
ret void
}
@@ -1593,7 +1593,7 @@ define void @test95(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acq_rel monotonic
ret void
}
@@ -1615,7 +1615,7 @@ define void @test96(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel acquire
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acq_rel acquire
ret void
}
@@ -1637,7 +1637,7 @@ define void @test97(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst monotonic
ret void
}
@@ -1659,7 +1659,7 @@ define void @test98(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst acquire
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst acquire
ret void
}
@@ -1681,7 +1681,7 @@ define void @test99(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst seq_cst
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
@@ -1701,7 +1701,7 @@ define void @test100(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread monotonic monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") monotonic monotonic
ret void
}
@@ -1722,7 +1722,7 @@ define void @test101(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acquire monotonic
ret void
}
@@ -1743,7 +1743,7 @@ define void @test102(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire acquire
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acquire acquire
ret void
}
@@ -1764,7 +1764,7 @@ define void @test103(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release monotonic
ret void
}
@@ -1785,7 +1785,7 @@ define void @test104(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release acquire
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release acquire
ret void
}
@@ -1807,7 +1807,7 @@ define void @test105(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acq_rel monotonic
ret void
}
@@ -1829,7 +1829,7 @@ define void @test106(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel acquire
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acq_rel acquire
ret void
}
@@ -1851,7 +1851,7 @@ define void @test107(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst monotonic
ret void
}
@@ -1873,7 +1873,7 @@ define void @test108(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst acquire
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst acquire
ret void
}
@@ -1895,7 +1895,7 @@ define void @test109(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst seq_cst
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
@@ -1915,7 +1915,7 @@ define void @test110(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread monotonic monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") monotonic monotonic
ret void
}
@@ -1936,7 +1936,7 @@ define void @test111(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acquire monotonic
ret void
}
@@ -1957,7 +1957,7 @@ define void @test112(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire acquire
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acquire acquire
ret void
}
@@ -1978,7 +1978,7 @@ define void @test113(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release monotonic
ret void
}
@@ -1999,7 +1999,7 @@ define void @test114(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release acquire
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release acquire
ret void
}
@@ -2021,7 +2021,7 @@ define void @test115(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acq_rel monotonic
ret void
}
@@ -2043,7 +2043,7 @@ define void @test116(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel acquire
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acq_rel acquire
ret void
}
@@ -2065,7 +2065,7 @@ define void @test117(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst monotonic
ret void
}
@@ -2087,7 +2087,7 @@ define void @test118(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst acquire
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst acquire
ret void
}
@@ -2109,7 +2109,7 @@ define void @test119(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst seq_cst
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
@@ -5847,7 +5847,7 @@ define i8 @test340(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -5862,7 +5862,7 @@ define i8 @test341(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -5877,7 +5877,7 @@ define i8 @test342(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -5893,7 +5893,7 @@ define i8 @test343(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -5909,7 +5909,7 @@ define i8 @test344(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -5923,7 +5923,7 @@ define i16 @test345(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -5938,7 +5938,7 @@ define i16 @test346(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -5953,7 +5953,7 @@ define i16 @test347(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -5969,7 +5969,7 @@ define i16 @test348(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -5985,7 +5985,7 @@ define i16 @test349(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -5999,7 +5999,7 @@ define i32 @test350(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -6014,7 +6014,7 @@ define i32 @test351(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -6029,7 +6029,7 @@ define i32 @test352(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -6045,7 +6045,7 @@ define i32 @test353(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -6061,7 +6061,7 @@ define i32 @test354(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -6075,7 +6075,7 @@ define i64 @test355(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -6090,7 +6090,7 @@ define i64 @test356(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -6105,7 +6105,7 @@ define i64 @test357(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -6121,7 +6121,7 @@ define i64 @test358(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -6137,7 +6137,7 @@ define i64 @test359(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -6152,7 +6152,7 @@ define i8 @test360(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -6168,7 +6168,7 @@ define i8 @test361(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -6184,7 +6184,7 @@ define i8 @test362(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -6201,7 +6201,7 @@ define i8 @test363(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -6218,7 +6218,7 @@ define i8 @test364(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -6233,7 +6233,7 @@ define i16 @test365(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -6249,7 +6249,7 @@ define i16 @test366(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -6265,7 +6265,7 @@ define i16 @test367(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -6282,7 +6282,7 @@ define i16 @test368(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -6299,7 +6299,7 @@ define i16 @test369(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -6314,7 +6314,7 @@ define i32 @test370(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -6330,7 +6330,7 @@ define i32 @test371(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -6346,7 +6346,7 @@ define i32 @test372(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -6363,7 +6363,7 @@ define i32 @test373(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -6380,7 +6380,7 @@ define i32 @test374(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -6395,7 +6395,7 @@ define i64 @test375(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -6411,7 +6411,7 @@ define i64 @test376(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -6427,7 +6427,7 @@ define i64 @test377(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -6444,7 +6444,7 @@ define i64 @test378(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -6461,7 +6461,7 @@ define i64 @test379(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -6476,7 +6476,7 @@ define i8 @test380(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -6492,7 +6492,7 @@ define i8 @test381(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -6508,7 +6508,7 @@ define i8 @test382(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -6525,7 +6525,7 @@ define i8 @test383(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -6542,7 +6542,7 @@ define i8 @test384(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -6557,7 +6557,7 @@ define i16 @test385(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -6573,7 +6573,7 @@ define i16 @test386(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -6589,7 +6589,7 @@ define i16 @test387(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -6606,7 +6606,7 @@ define i16 @test388(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -6623,7 +6623,7 @@ define i16 @test389(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -6638,7 +6638,7 @@ define i32 @test390(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -6654,7 +6654,7 @@ define i32 @test391(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -6670,7 +6670,7 @@ define i32 @test392(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -6687,7 +6687,7 @@ define i32 @test393(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -6704,7 +6704,7 @@ define i32 @test394(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -6719,7 +6719,7 @@ define i64 @test395(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -6735,7 +6735,7 @@ define i64 @test396(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -6751,7 +6751,7 @@ define i64 @test397(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -6768,7 +6768,7 @@ define i64 @test398(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -6785,7 +6785,7 @@ define i64 @test399(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -6800,7 +6800,7 @@ define i8 @test400(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -6816,7 +6816,7 @@ define i8 @test401(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -6832,7 +6832,7 @@ define i8 @test402(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -6849,7 +6849,7 @@ define i8 @test403(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -6866,7 +6866,7 @@ define i8 @test404(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -6881,7 +6881,7 @@ define i16 @test405(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -6897,7 +6897,7 @@ define i16 @test406(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -6913,7 +6913,7 @@ define i16 @test407(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -6930,7 +6930,7 @@ define i16 @test408(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -6947,7 +6947,7 @@ define i16 @test409(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -6962,7 +6962,7 @@ define i32 @test410(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -6978,7 +6978,7 @@ define i32 @test411(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -6994,7 +6994,7 @@ define i32 @test412(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -7011,7 +7011,7 @@ define i32 @test413(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -7028,7 +7028,7 @@ define i32 @test414(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -7043,7 +7043,7 @@ define i64 @test415(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -7059,7 +7059,7 @@ define i64 @test416(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -7075,7 +7075,7 @@ define i64 @test417(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -7092,7 +7092,7 @@ define i64 @test418(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -7109,7 +7109,7 @@ define i64 @test419(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -7124,7 +7124,7 @@ define i8 @test420(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -7140,7 +7140,7 @@ define i8 @test421(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -7156,7 +7156,7 @@ define i8 @test422(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -7173,7 +7173,7 @@ define i8 @test423(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -7190,7 +7190,7 @@ define i8 @test424(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -7205,7 +7205,7 @@ define i16 @test425(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -7221,7 +7221,7 @@ define i16 @test426(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -7237,7 +7237,7 @@ define i16 @test427(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -7254,7 +7254,7 @@ define i16 @test428(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -7271,7 +7271,7 @@ define i16 @test429(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -7286,7 +7286,7 @@ define i32 @test430(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -7302,7 +7302,7 @@ define i32 @test431(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -7318,7 +7318,7 @@ define i32 @test432(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -7335,7 +7335,7 @@ define i32 @test433(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -7352,7 +7352,7 @@ define i32 @test434(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -7367,7 +7367,7 @@ define i64 @test435(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -7383,7 +7383,7 @@ define i64 @test436(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -7399,7 +7399,7 @@ define i64 @test437(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -7416,7 +7416,7 @@ define i64 @test438(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -7433,7 +7433,7 @@ define i64 @test439(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -7448,7 +7448,7 @@ define i8 @test440(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -7464,7 +7464,7 @@ define i8 @test441(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -7480,7 +7480,7 @@ define i8 @test442(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -7497,7 +7497,7 @@ define i8 @test443(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -7514,7 +7514,7 @@ define i8 @test444(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -7529,7 +7529,7 @@ define i16 @test445(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -7545,7 +7545,7 @@ define i16 @test446(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -7561,7 +7561,7 @@ define i16 @test447(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -7578,7 +7578,7 @@ define i16 @test448(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -7595,7 +7595,7 @@ define i16 @test449(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -7610,7 +7610,7 @@ define i32 @test450(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -7626,7 +7626,7 @@ define i32 @test451(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -7642,7 +7642,7 @@ define i32 @test452(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -7659,7 +7659,7 @@ define i32 @test453(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -7676,7 +7676,7 @@ define i32 @test454(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -7691,7 +7691,7 @@ define i64 @test455(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -7707,7 +7707,7 @@ define i64 @test456(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -7723,7 +7723,7 @@ define i64 @test457(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -7740,7 +7740,7 @@ define i64 @test458(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -7757,7 +7757,7 @@ define i64 @test459(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -7772,7 +7772,7 @@ define i8 @test460(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -7788,7 +7788,7 @@ define i8 @test461(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -7804,7 +7804,7 @@ define i8 @test462(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -7821,7 +7821,7 @@ define i8 @test463(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -7838,7 +7838,7 @@ define i8 @test464(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -7853,7 +7853,7 @@ define i16 @test465(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -7869,7 +7869,7 @@ define i16 @test466(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -7885,7 +7885,7 @@ define i16 @test467(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -7902,7 +7902,7 @@ define i16 @test468(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -7919,7 +7919,7 @@ define i16 @test469(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -7934,7 +7934,7 @@ define i32 @test470(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -7950,7 +7950,7 @@ define i32 @test471(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -7966,7 +7966,7 @@ define i32 @test472(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -7983,7 +7983,7 @@ define i32 @test473(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -8000,7 +8000,7 @@ define i32 @test474(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -8015,7 +8015,7 @@ define i64 @test475(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -8031,7 +8031,7 @@ define i64 @test476(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -8047,7 +8047,7 @@ define i64 @test477(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -8064,7 +8064,7 @@ define i64 @test478(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -8081,7 +8081,7 @@ define i64 @test479(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -8099,7 +8099,7 @@ define i8 @test480(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB480_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -8118,7 +8118,7 @@ define i8 @test481(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB481_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -8137,7 +8137,7 @@ define i8 @test482(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB482_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -8157,7 +8157,7 @@ define i8 @test483(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -8177,7 +8177,7 @@ define i8 @test484(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -8195,7 +8195,7 @@ define i16 @test485(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB485_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -8214,7 +8214,7 @@ define i16 @test486(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB486_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -8233,7 +8233,7 @@ define i16 @test487(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB487_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -8253,7 +8253,7 @@ define i16 @test488(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -8273,7 +8273,7 @@ define i16 @test489(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -8290,7 +8290,7 @@ define i32 @test490(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB490_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -8308,7 +8308,7 @@ define i32 @test491(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB491_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -8326,7 +8326,7 @@ define i32 @test492(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB492_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -8345,7 +8345,7 @@ define i32 @test493(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -8364,7 +8364,7 @@ define i32 @test494(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -8381,7 +8381,7 @@ define i64 @test495(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB495_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -8399,7 +8399,7 @@ define i64 @test496(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB496_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -8417,7 +8417,7 @@ define i64 @test497(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB497_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -8436,7 +8436,7 @@ define i64 @test498(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -8455,7 +8455,7 @@ define i64 @test499(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -8473,7 +8473,7 @@ define i8 @test500(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB500_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -8492,7 +8492,7 @@ define i8 @test501(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB501_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -8511,7 +8511,7 @@ define i8 @test502(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB502_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -8531,7 +8531,7 @@ define i8 @test503(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -8551,7 +8551,7 @@ define i8 @test504(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -8569,7 +8569,7 @@ define i16 @test505(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB505_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -8588,7 +8588,7 @@ define i16 @test506(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB506_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -8607,7 +8607,7 @@ define i16 @test507(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB507_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -8627,7 +8627,7 @@ define i16 @test508(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -8647,7 +8647,7 @@ define i16 @test509(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -8664,7 +8664,7 @@ define i32 @test510(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB510_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -8682,7 +8682,7 @@ define i32 @test511(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB511_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -8700,7 +8700,7 @@ define i32 @test512(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB512_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -8719,7 +8719,7 @@ define i32 @test513(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -8738,7 +8738,7 @@ define i32 @test514(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -8755,7 +8755,7 @@ define i64 @test515(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB515_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -8773,7 +8773,7 @@ define i64 @test516(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB516_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -8791,7 +8791,7 @@ define i64 @test517(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB517_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -8810,7 +8810,7 @@ define i64 @test518(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -8829,7 +8829,7 @@ define i64 @test519(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -8846,7 +8846,7 @@ define i8 @test520(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB520_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -8864,7 +8864,7 @@ define i8 @test521(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB521_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -8882,7 +8882,7 @@ define i8 @test522(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB522_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -8901,7 +8901,7 @@ define i8 @test523(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -8920,7 +8920,7 @@ define i8 @test524(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -8937,7 +8937,7 @@ define i16 @test525(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB525_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -8955,7 +8955,7 @@ define i16 @test526(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB526_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -8973,7 +8973,7 @@ define i16 @test527(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB527_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -8992,7 +8992,7 @@ define i16 @test528(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -9011,7 +9011,7 @@ define i16 @test529(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -9028,7 +9028,7 @@ define i32 @test530(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB530_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -9046,7 +9046,7 @@ define i32 @test531(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB531_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -9064,7 +9064,7 @@ define i32 @test532(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB532_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -9083,7 +9083,7 @@ define i32 @test533(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -9102,7 +9102,7 @@ define i32 @test534(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -9119,7 +9119,7 @@ define i64 @test535(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB535_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -9137,7 +9137,7 @@ define i64 @test536(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB536_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -9155,7 +9155,7 @@ define i64 @test537(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB537_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -9174,7 +9174,7 @@ define i64 @test538(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -9193,7 +9193,7 @@ define i64 @test539(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -9210,7 +9210,7 @@ define i8 @test540(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB540_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -9228,7 +9228,7 @@ define i8 @test541(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB541_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -9246,7 +9246,7 @@ define i8 @test542(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB542_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -9265,7 +9265,7 @@ define i8 @test543(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -9284,7 +9284,7 @@ define i8 @test544(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -9301,7 +9301,7 @@ define i16 @test545(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB545_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -9319,7 +9319,7 @@ define i16 @test546(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB546_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -9337,7 +9337,7 @@ define i16 @test547(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB547_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -9356,7 +9356,7 @@ define i16 @test548(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -9375,7 +9375,7 @@ define i16 @test549(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -9392,7 +9392,7 @@ define i32 @test550(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB550_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -9410,7 +9410,7 @@ define i32 @test551(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB551_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -9428,7 +9428,7 @@ define i32 @test552(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB552_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -9447,7 +9447,7 @@ define i32 @test553(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -9466,7 +9466,7 @@ define i32 @test554(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -9483,7 +9483,7 @@ define i64 @test555(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB555_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -9501,7 +9501,7 @@ define i64 @test556(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB556_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -9519,7 +9519,7 @@ define i64 @test557(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB557_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -9538,7 +9538,7 @@ define i64 @test558(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -9557,7 +9557,7 @@ define i64 @test559(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
diff --git a/test/CodeGen/PowerPC/bitreverse.ll b/test/CodeGen/PowerPC/bitreverse.ll
deleted file mode 100644
index dca7340d035d..000000000000
--- a/test/CodeGen/PowerPC/bitreverse.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc -verify-machineinstrs -march=ppc64 %s -o - | FileCheck %s
-
-; These tests just check that the plumbing is in place for @llvm.bitreverse. The
-; actual output is massive at the moment as llvm.bitreverse is not yet legal.
-
-declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>) readnone
-
-define <2 x i16> @f(<2 x i16> %a) {
-; CHECK-LABEL: f:
-; CHECK: rlwinm
- %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a)
- ret <2 x i16> %b
-}
-
-declare i8 @llvm.bitreverse.i8(i8) readnone
-
-define i8 @g(i8 %a) {
-; CHECK-LABEL: g:
-; CHECK: rlwinm
-; CHECK: rlwimi
- %b = call i8 @llvm.bitreverse.i8(i8 %a)
- ret i8 %b
-}
diff --git a/test/CodeGen/PowerPC/build-vector-tests.ll b/test/CodeGen/PowerPC/build-vector-tests.ll
index c42f677d17ab..60bec4d18f12 100644
--- a/test/CodeGen/PowerPC/build-vector-tests.ll
+++ b/test/CodeGen/PowerPC/build-vector-tests.ll
@@ -1028,7 +1028,7 @@ entry:
; P9LE: vperm
; P9LE: blr
; P8BE: sldi {{r[0-9]+}}, r4, 2
-; P8BE-DAG: lxvw4x {{v[0-9]+}}, r3,
+; P8BE-DAG: lxvw4x {{v[0-9]+}}, 0, r3
; P8BE-DAG: lxvw4x
; P8BE: vperm
; P8BE: blr
@@ -2187,7 +2187,7 @@ entry:
; P9LE: vperm
; P9LE: blr
; P8BE-DAG: sldi {{r[0-9]+}}, r4, 2
-; P8BE-DAG: lxvw4x {{v[0-9]+}}, r3
+; P8BE-DAG: lxvw4x {{v[0-9]+}}, 0, r3
; P8BE-DAG: lxvw4x
; P8BE: vperm
; P8BE: blr
diff --git a/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll b/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll
new file mode 100644
index 000000000000..71755f722cb2
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr9 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s
+
+; Function Attrs: norecurse nounwind readonly
+define signext i32 @limit_loop(i32 signext %iters, i32* nocapture readonly %vec, i32 signext %limit) local_unnamed_addr {
+entry:
+ %cmp5 = icmp sgt i32 %iters, 0
+ br i1 %cmp5, label %for.body.preheader, label %cleanup
+
+for.body.preheader: ; preds = %entry
+ %0 = sext i32 %iters to i64
+ br label %for.body
+
+for.cond: ; preds = %for.body
+ %cmp = icmp slt i64 %indvars.iv.next, %0
+ br i1 %cmp, label %for.body, label %cleanup
+
+for.body: ; preds = %for.body.preheader, %for.cond
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.cond ]
+ %arrayidx = getelementptr inbounds i32, i32* %vec, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx, align 4
+ %cmp1 = icmp slt i32 %1, %limit
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ br i1 %cmp1, label %for.cond, label %cleanup
+
+cleanup: ; preds = %for.body, %for.cond, %entry
+ %2 = phi i32 [ 0, %entry ], [ 0, %for.cond ], [ 1, %for.body ]
+ ret i32 %2
+; CHECK-LABEL: limit_loop
+; CHECK: mtctr
+; CHECK-NOT: addi {{[0-9]+}}, {{[0-9]+}}, 1
+; CHECK: bdnz
+; CHECK: blr
+}
+
+
diff --git a/test/CodeGen/PowerPC/ppc-redzone-alignment-bug.ll b/test/CodeGen/PowerPC/ppc-redzone-alignment-bug.ll
new file mode 100644
index 000000000000..87b45beeab7e
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc-redzone-alignment-bug.ll
@@ -0,0 +1,32 @@
+; Note the formula for negative number alignment calculation should be y = x & ~(n-1) rather than y = (x + (n-1)) & ~(n-1).
+; after patch https://reviews.llvm.org/D34337, we could save 16 bytes in the best case.
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s -check-prefix=CHECK-BE
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s -check-prefix=CHECK-LE
+
+define signext i32 @bar(i32 signext %ii) {
+entry:
+ %0 = tail call i32 asm sideeffect "add $0, $1, $2\0A", "=r,r,r,~{f14},~{r15},~{v20}"(i32 %ii, i32 10)
+ ret i32 %0
+; Before the fix by patch D34337:
+; stdu 1, -544(1)
+; std 15, 264(1)
+; stfd 14, 400(1)
+; stdu 1, -560(1)
+; std 15, 280(1)
+; stfd 14, 416(1)
+
+; After the fix by patch D34337:
+; CHECK-LE: stdu 1, -528(1)
+; CHECK-LE:std 15, 248(1)
+; CHECK-LE:stfd 14, 384(1)
+; CHECK-BE: stdu 1, -544(1)
+; CHECK-BE:std 15, 264(1)
+; CHECK-BE:stfd 14, 400(1)
+}
+
+define signext i32 @foo() {
+entry:
+ %call = tail call signext i32 @bar(i32 signext 5)
+ ret i32 %call
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64le-smallarg.ll b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
index 0e871c358869..3a425406d043 100644
--- a/test/CodeGen/PowerPC/ppc64le-smallarg.ll
+++ b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
@@ -53,8 +53,8 @@ entry:
ret void
}
; CHECK: @caller2
-; CHECK: li [[TOCOFF:[0-9]+]], 136
-; CHECK: stxsspx {{[0-9]+}}, 1, [[TOCOFF]]
+; CHECK: addi [[TOCOFF:[0-9]+]], {{[0-9]+}}, 136
+; CHECK: stxsspx {{[0-9]+}}, 0, [[TOCOFF]]
; CHECK: bl test2
declare float @test2(float, float, float, float, float, float, float, float, float, float, float, float, float, float)
diff --git a/test/CodeGen/PowerPC/pr33093.ll b/test/CodeGen/PowerPC/pr33093.ll
new file mode 100644
index 000000000000..5212973f8317
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr33093.ll
@@ -0,0 +1,165 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+
+define zeroext i32 @ReverseBits(i32 zeroext %n) {
+; CHECK-LABEL: ReverseBits:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lis 4, -21846
+; CHECK-NEXT: lis 5, 21845
+; CHECK-NEXT: slwi 6, 3, 1
+; CHECK-NEXT: srwi 3, 3, 1
+; CHECK-NEXT: lis 7, -13108
+; CHECK-NEXT: lis 8, 13107
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: lis 10, -3856
+; CHECK-NEXT: lis 11, 3855
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: and 4, 6, 4
+; CHECK-NEXT: ori 5, 8, 13107
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: ori 4, 7, 52428
+; CHECK-NEXT: slwi 9, 3, 2
+; CHECK-NEXT: srwi 3, 3, 2
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: and 4, 9, 4
+; CHECK-NEXT: ori 5, 11, 3855
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: ori 4, 10, 61680
+; CHECK-NEXT: slwi 12, 3, 4
+; CHECK-NEXT: srwi 3, 3, 4
+; CHECK-NEXT: and 4, 12, 4
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: rotlwi 4, 3, 24
+; CHECK-NEXT: rlwimi 4, 3, 8, 8, 15
+; CHECK-NEXT: rlwimi 4, 3, 8, 24, 31
+; CHECK-NEXT: rldicl 3, 4, 0, 32
+; CHECK-NEXT: clrldi 3, 3, 32
+; CHECK-NEXT: blr
+entry:
+ %shr = lshr i32 %n, 1
+ %and = and i32 %shr, 1431655765
+ %and1 = shl i32 %n, 1
+ %shl = and i32 %and1, -1431655766
+ %or = or i32 %and, %shl
+ %shr2 = lshr i32 %or, 2
+ %and3 = and i32 %shr2, 858993459
+ %and4 = shl i32 %or, 2
+ %shl5 = and i32 %and4, -858993460
+ %or6 = or i32 %and3, %shl5
+ %shr7 = lshr i32 %or6, 4
+ %and8 = and i32 %shr7, 252645135
+ %and9 = shl i32 %or6, 4
+ %shl10 = and i32 %and9, -252645136
+ %or11 = or i32 %and8, %shl10
+ %shr13 = lshr i32 %or11, 24
+ %and14 = lshr i32 %or11, 8
+ %shr15 = and i32 %and14, 65280
+ %and17 = shl i32 %or11, 8
+ %shl18 = and i32 %and17, 16711680
+ %shl21 = shl i32 %or11, 24
+ %or16 = or i32 %shl21, %shr13
+ %or19 = or i32 %or16, %shr15
+ %or22 = or i32 %or19, %shl18
+ ret i32 %or22
+}
+
+define i64 @ReverseBits64(i64 %n) {
+; CHECK-LABEL: ReverseBits64:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lis 4, -21846
+; CHECK-NEXT: lis 5, 21845
+; CHECK-NEXT: lis 6, -13108
+; CHECK-NEXT: lis 7, 13107
+; CHECK-NEXT: sldi 8, 3, 1
+; CHECK-NEXT: rldicl 3, 3, 63, 1
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: ori 6, 6, 52428
+; CHECK-NEXT: ori 7, 7, 13107
+; CHECK-NEXT: sldi 4, 4, 32
+; CHECK-NEXT: sldi 5, 5, 32
+; CHECK-NEXT: oris 4, 4, 43690
+; CHECK-NEXT: oris 5, 5, 21845
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: sldi 5, 6, 32
+; CHECK-NEXT: sldi 6, 7, 32
+; CHECK-NEXT: and 4, 8, 4
+; CHECK-NEXT: lis 7, 3855
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: oris 12, 5, 52428
+; CHECK-NEXT: oris 9, 6, 13107
+; CHECK-NEXT: lis 6, -3856
+; CHECK-NEXT: ori 7, 7, 3855
+; CHECK-NEXT: sldi 8, 3, 2
+; CHECK-NEXT: ori 4, 12, 52428
+; CHECK-NEXT: rldicl 3, 3, 62, 2
+; CHECK-NEXT: ori 5, 9, 13107
+; CHECK-NEXT: ori 6, 6, 61680
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: sldi 5, 6, 32
+; CHECK-NEXT: and 4, 8, 4
+; CHECK-NEXT: sldi 6, 7, 32
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: oris 10, 5, 61680
+; CHECK-NEXT: oris 11, 6, 3855
+; CHECK-NEXT: sldi 6, 3, 4
+; CHECK-NEXT: ori 4, 10, 61680
+; CHECK-NEXT: rldicl 3, 3, 60, 4
+; CHECK-NEXT: ori 5, 11, 3855
+; CHECK-NEXT: and 4, 6, 4
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: rldicl 4, 3, 32, 32
+; CHECK-NEXT: rlwinm 6, 3, 24, 0, 31
+; CHECK-NEXT: rlwinm 5, 4, 24, 0, 31
+; CHECK-NEXT: rlwimi 6, 3, 8, 8, 15
+; CHECK-NEXT: rlwimi 5, 4, 8, 8, 15
+; CHECK-NEXT: rlwimi 6, 3, 8, 24, 31
+; CHECK-NEXT: rlwimi 5, 4, 8, 24, 31
+; CHECK-NEXT: sldi 12, 5, 32
+; CHECK-NEXT: or 3, 12, 6
+; CHECK-NEXT: blr
+entry:
+ %shr = lshr i64 %n, 1
+ %and = and i64 %shr, 6148914691236517205
+ %and1 = shl i64 %n, 1
+ %shl = and i64 %and1, -6148914691236517206
+ %or = or i64 %and, %shl
+ %shr2 = lshr i64 %or, 2
+ %and3 = and i64 %shr2, 3689348814741910323
+ %and4 = shl i64 %or, 2
+ %shl5 = and i64 %and4, -3689348814741910324
+ %or6 = or i64 %and3, %shl5
+ %shr7 = lshr i64 %or6, 4
+ %and8 = and i64 %shr7, 1085102592571150095
+ %and9 = shl i64 %or6, 4
+ %shl10 = and i64 %and9, -1085102592571150096
+ %or11 = or i64 %and8, %shl10
+ %shr13 = lshr i64 %or11, 56
+ %and14 = lshr i64 %or11, 40
+ %shr15 = and i64 %and14, 65280
+ %and17 = lshr i64 %or11, 24
+ %shr18 = and i64 %and17, 16711680
+ %and20 = lshr i64 %or11, 8
+ %shr21 = and i64 %and20, 4278190080
+ %and23 = shl i64 %or11, 8
+ %shl24 = and i64 %and23, 1095216660480
+ %and26 = shl i64 %or11, 24
+ %shl27 = and i64 %and26, 280375465082880
+ %and29 = shl i64 %or11, 40
+ %shl30 = and i64 %and29, 71776119061217280
+ %shl33 = shl i64 %or11, 56
+ %or16 = or i64 %shl33, %shr13
+ %or19 = or i64 %or16, %shr15
+ %or22 = or i64 %or19, %shr18
+ %or25 = or i64 %or22, %shr21
+ %or28 = or i64 %or25, %shl24
+ %or31 = or i64 %or28, %shl27
+ %or34 = or i64 %or31, %shl30
+ ret i64 %or34
+}
diff --git a/test/CodeGen/PowerPC/select-addrRegRegOnly.ll b/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
new file mode 100644
index 000000000000..f880d1faf9d9
--- /dev/null
+++ b/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64-unknown-unknown -verify-machineinstrs < %s | FileCheck %s
+
+; Function Attrs: norecurse nounwind readonly
+define float @testSingleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
+; CHECK-LABEL: testSingleAccess:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addi 3, 3, 8
+; CHECK-NEXT: lxsiwax 0, 0, 3
+; CHECK-NEXT: xscvsxdsp 1, 0
+; CHECK-NEXT: blr
+entry:
+ %arrayidx = getelementptr inbounds i32, i32* %arr, i64 2
+ %0 = load i32, i32* %arrayidx, align 4
+ %conv = sitofp i32 %0 to float
+ ret float %conv
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @testMultipleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
+; CHECK-LABEL: testMultipleAccess:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 4, 8(3)
+; CHECK-NEXT: lwz 12, 12(3)
+; CHECK-NEXT: add 3, 12, 4
+; CHECK-NEXT: mtvsrwa 0, 3
+; CHECK-NEXT: xscvsxdsp 1, 0
+; CHECK-NEXT: blr
+entry:
+ %arrayidx = getelementptr inbounds i32, i32* %arr, i64 2
+ %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 3
+ %1 = load i32, i32* %arrayidx1, align 4
+ %add = add nsw i32 %1, %0
+ %conv = sitofp i32 %add to float
+ ret float %conv
+}
diff --git a/test/CodeGen/PowerPC/svr4-redzone.ll b/test/CodeGen/PowerPC/svr4-redzone.ll
index 7bb6cc180c96..26c4410ded6d 100644
--- a/test/CodeGen/PowerPC/svr4-redzone.ll
+++ b/test/CodeGen/PowerPC/svr4-redzone.ll
@@ -29,11 +29,11 @@ entry:
define i8* @bigstack() nounwind {
entry:
- %0 = alloca i8, i32 230
+ %0 = alloca i8, i32 290
ret i8* %0
}
; PPC32-LABEL: bigstack:
-; PPC32: stwu 1, -240(1)
+; PPC32: stwu 1, -304(1)
; PPC64-LABEL: bigstack:
-; PPC64: stdu 1, -288(1)
+; PPC64: stdu 1, -352(1)
diff --git a/test/CodeGen/PowerPC/tailcall1-64.ll b/test/CodeGen/PowerPC/tailcall1-64.ll
index 3dc2672556ea..58ab0bce309c 100644
--- a/test/CodeGen/PowerPC/tailcall1-64.ll
+++ b/test/CodeGen/PowerPC/tailcall1-64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -relocation-model=static -verify-machineinstrs < %s -march=ppc64 -tailcallopt | grep TC_RETURNd8
+; RUN: llc -relocation-model=static -verify-machineinstrs < %s -mtriple=ppc64-- -tailcallopt | grep TC_RETURNd8
+; RUN: llc -relocation-model=static -verify-machineinstrs -mtriple=ppc64-- < %s | FileCheck %s
define fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
entry:
ret i32 %a3
@@ -6,6 +7,8 @@ entry:
define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
entry:
- %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
+ %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 )
ret i32 %tmp11
+; CHECK-LABEL: tailcaller
+; CHECK-NOT: stdu
}
diff --git a/test/CodeGen/PowerPC/testBitReverse.ll b/test/CodeGen/PowerPC/testBitReverse.ll
new file mode 100644
index 000000000000..6993d17ad8f3
--- /dev/null
+++ b/test/CodeGen/PowerPC/testBitReverse.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+declare i32 @llvm.bitreverse.i32(i32)
+define i32 @testBitReverseIntrinsicI32(i32 %arg) {
+; CHECK-LABEL: testBitReverseIntrinsicI32:
+; CHECK: # BB#0:
+; CHECK-NEXT: lis 4, -21846
+; CHECK-NEXT: lis 5, 21845
+; CHECK-NEXT: slwi 6, 3, 1
+; CHECK-NEXT: srwi 3, 3, 1
+; CHECK-NEXT: lis 7, -13108
+; CHECK-NEXT: lis 8, 13107
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: lis 10, -3856
+; CHECK-NEXT: lis 11, 3855
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: and 4, 6, 4
+; CHECK-NEXT: ori 5, 8, 13107
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: ori 4, 7, 52428
+; CHECK-NEXT: slwi 9, 3, 2
+; CHECK-NEXT: srwi 3, 3, 2
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: and 4, 9, 4
+; CHECK-NEXT: ori 5, 11, 3855
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: ori 4, 10, 61680
+; CHECK-NEXT: slwi 12, 3, 4
+; CHECK-NEXT: srwi 3, 3, 4
+; CHECK-NEXT: and 4, 12, 4
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: rotlwi 4, 3, 24
+; CHECK-NEXT: rlwimi 4, 3, 8, 8, 15
+; CHECK-NEXT: rlwimi 4, 3, 8, 24, 31
+; CHECK-NEXT: rldicl 3, 4, 0, 32
+; CHECK-NEXT: blr
+ %res = call i32 @llvm.bitreverse.i32(i32 %arg)
+ ret i32 %res
+}
+
+declare i64 @llvm.bitreverse.i64(i64)
+define i64 @testBitReverseIntrinsicI64(i64 %arg) {
+; CHECK-LABEL: testBitReverseIntrinsicI64:
+; CHECK: # BB#0:
+; CHECK-NEXT: lis 4, -21846
+; CHECK-NEXT: lis 5, 21845
+; CHECK-NEXT: lis 6, -13108
+; CHECK-NEXT: lis 7, 13107
+; CHECK-NEXT: sldi 8, 3, 1
+; CHECK-NEXT: rldicl 3, 3, 63, 1
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: ori 6, 6, 52428
+; CHECK-NEXT: ori 7, 7, 13107
+; CHECK-NEXT: sldi 4, 4, 32
+; CHECK-NEXT: sldi 5, 5, 32
+; CHECK-NEXT: oris 4, 4, 43690
+; CHECK-NEXT: oris 5, 5, 21845
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: sldi 5, 6, 32
+; CHECK-NEXT: sldi 6, 7, 32
+; CHECK-NEXT: and 4, 8, 4
+; CHECK-NEXT: lis 7, 3855
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: oris 12, 5, 52428
+; CHECK-NEXT: oris 9, 6, 13107
+; CHECK-NEXT: lis 6, -3856
+; CHECK-NEXT: ori 7, 7, 3855
+; CHECK-NEXT: sldi 8, 3, 2
+; CHECK-NEXT: ori 4, 12, 52428
+; CHECK-NEXT: rldicl 3, 3, 62, 2
+; CHECK-NEXT: ori 5, 9, 13107
+; CHECK-NEXT: ori 6, 6, 61680
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: sldi 5, 6, 32
+; CHECK-NEXT: and 4, 8, 4
+; CHECK-NEXT: sldi 6, 7, 32
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: oris 10, 5, 61680
+; CHECK-NEXT: oris 11, 6, 3855
+; CHECK-NEXT: sldi 6, 3, 4
+; CHECK-NEXT: ori 4, 10, 61680
+; CHECK-NEXT: rldicl 3, 3, 60, 4
+; CHECK-NEXT: ori 5, 11, 3855
+; CHECK-NEXT: and 4, 6, 4
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: rldicl 4, 3, 32, 32
+; CHECK-NEXT: rlwinm 6, 3, 24, 0, 31
+; CHECK-NEXT: rlwinm 5, 4, 24, 0, 31
+; CHECK-NEXT: rlwimi 6, 3, 8, 8, 15
+; CHECK-NEXT: rlwimi 5, 4, 8, 8, 15
+; CHECK-NEXT: rlwimi 6, 3, 8, 24, 31
+; CHECK-NEXT: rlwimi 5, 4, 8, 24, 31
+; CHECK-NEXT: sldi 12, 5, 32
+; CHECK-NEXT: or 3, 12, 6
+; CHECK-NEXT: blr
+ %res = call i64 @llvm.bitreverse.i64(i64 %arg)
+ ret i64 %res
+}
diff --git a/test/CodeGen/PowerPC/vec_extract_p9.ll b/test/CodeGen/PowerPC/vec_extract_p9.ll
new file mode 100644
index 000000000000..241209a0e6b7
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_extract_p9.ll
@@ -0,0 +1,167 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr9 < %s | FileCheck %s -check-prefix=CHECK-LE
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-gnu-linux -mcpu=pwr9 < %s | FileCheck %s -check-prefix=CHECK-BE
+
+define zeroext i8 @test1(<16 x i8> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test1:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextubrx 3, 5, 2
+; CHECK-LE-NEXT: clrldi 3, 3, 56
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test1:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextublx 3, 5, 2
+; CHECK-BE-NEXT: clrldi 3, 3, 56
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 %index
+ ret i8 %vecext
+}
+
+define signext i8 @test2(<16 x i8> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test2:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextubrx 3, 5, 2
+; CHECK-LE-NEXT: extsb 3, 3
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test2:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextublx 3, 5, 2
+; CHECK-BE-NEXT: extsb 3, 3
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 %index
+ ret i8 %vecext
+}
+
+define zeroext i16 @test3(<8 x i16> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test3:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30
+; CHECK-LE-NEXT: vextuhrx 3, 3, 2
+; CHECK-LE-NEXT: clrldi 3, 3, 48
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test3:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30
+; CHECK-BE-NEXT: vextuhlx 3, 3, 2
+; CHECK-BE-NEXT: clrldi 3, 3, 48
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 %index
+ ret i16 %vecext
+}
+
+define signext i16 @test4(<8 x i16> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test4:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30
+; CHECK-LE-NEXT: vextuhrx 3, 3, 2
+; CHECK-LE-NEXT: extsh 3, 3
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test4:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30
+; CHECK-BE-NEXT: vextuhlx 3, 3, 2
+; CHECK-BE-NEXT: extsh 3, 3
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 %index
+ ret i16 %vecext
+}
+
+define zeroext i32 @test5(<4 x i32> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test5:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29
+; CHECK-LE-NEXT: vextuwrx 3, 3, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test5:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29
+; CHECK-BE-NEXT: vextuwlx 3, 3, 2
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 %index
+ ret i32 %vecext
+}
+
+define signext i32 @test6(<4 x i32> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test6:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29
+; CHECK-LE-NEXT: vextuwrx 3, 3, 2
+; CHECK-LE-NEXT: extsw 3, 3
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test6:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29
+; CHECK-BE-NEXT: vextuwlx 3, 3, 2
+; CHECK-BE-NEXT: extsw 3, 3
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 %index
+ ret i32 %vecext
+}
+
+; Test with immediate index
+define zeroext i8 @test7(<16 x i8> %a) {
+; CHECK-LE-LABEL: test7:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: li 3, 1
+; CHECK-LE-NEXT: vextubrx 3, 3, 2
+; CHECK-LE-NEXT: clrldi 3, 3, 56
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test7:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: li 3, 1
+; CHECK-BE-NEXT: vextublx 3, 3, 2
+; CHECK-BE-NEXT: clrldi 3, 3, 56
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 1
+ ret i8 %vecext
+}
+
+define zeroext i16 @test8(<8 x i16> %a) {
+; CHECK-LE-LABEL: test8:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: li 3, 2
+; CHECK-LE-NEXT: vextuhrx 3, 3, 2
+; CHECK-LE-NEXT: clrldi 3, 3, 48
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test8:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: li 3, 2
+; CHECK-BE-NEXT: vextuhlx 3, 3, 2
+; CHECK-BE-NEXT: clrldi 3, 3, 48
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 1
+ ret i16 %vecext
+}
+
+define zeroext i32 @test9(<4 x i32> %a) {
+; CHECK-LE-LABEL: test9:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: li 3, 4
+; CHECK-LE-NEXT: vextuwrx 3, 3, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test9:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: li 3, 4
+; CHECK-BE-NEXT: vextuwlx 3, 3, 2
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 1
+ ret i32 %vecext
+}
diff --git a/test/CodeGen/PowerPC/vec_int_ext.ll b/test/CodeGen/PowerPC/vec_int_ext.ll
index 9e1218c423b7..d7bed503318e 100644
--- a/test/CodeGen/PowerPC/vec_int_ext.ll
+++ b/test/CodeGen/PowerPC/vec_int_ext.ll
@@ -1,12 +1,18 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs -mcpu=pwr9 < %s | FileCheck %s -check-prefix=PWR9
-target triple = "powerpc64le-unknown-linux-gnu"
-
-define <4 x i32> @vextsb2w(<16 x i8> %a) {
-; PWR9-LABEL: vextsb2w:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsb2w 2, 2
-; PWR9-NEXT: blr
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr9 < %s | FileCheck %s -check-prefix=CHECK-LE
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-gnu-linux -mcpu=pwr9 < %s | FileCheck %s -check-prefix=CHECK-BE
+
+define <4 x i32> @vextsb2wLE(<16 x i8> %a) {
+; CHECK-LE-LABEL: vextsb2wLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsb2w 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsb2wLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vperm 2, 2, 2, 3
+; CHECK-BE-NEXT: vextsb2w 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <16 x i8> %a, i32 0
%conv = sext i8 %vecext to i32
@@ -23,11 +29,17 @@ entry:
ret <4 x i32> %vecinit9
}
-define <2 x i64> @vextsb2d(<16 x i8> %a) {
-; PWR9-LABEL: vextsb2d:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsb2d 2, 2
-; PWR9-NEXT: blr
+define <2 x i64> @vextsb2dLE(<16 x i8> %a) {
+; CHECK-LE-LABEL: vextsb2dLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsb2d 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsb2dLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vperm 2, 2, 2, 3
+; CHECK-BE-NEXT: vextsb2d 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <16 x i8> %a, i32 0
%conv = sext i8 %vecext to i64
@@ -38,11 +50,17 @@ entry:
ret <2 x i64> %vecinit3
}
-define <4 x i32> @vextsh2w(<8 x i16> %a) {
-; PWR9-LABEL: vextsh2w:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsh2w 2, 2
-; PWR9-NEXT: blr
+define <4 x i32> @vextsh2wLE(<8 x i16> %a) {
+; CHECK-LE-LABEL: vextsh2wLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsh2w 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsh2wLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vperm 2, 2, 2, 3
+; CHECK-BE-NEXT: vextsh2w 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <8 x i16> %a, i32 0
%conv = sext i16 %vecext to i32
@@ -59,11 +77,17 @@ entry:
ret <4 x i32> %vecinit9
}
-define <2 x i64> @vextsh2d(<8 x i16> %a) {
-; PWR9-LABEL: vextsh2d:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsh2d 2, 2
-; PWR9-NEXT: blr
+define <2 x i64> @vextsh2dLE(<8 x i16> %a) {
+; CHECK-LE-LABEL: vextsh2dLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsh2d 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsh2dLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vperm 2, 2, 2, 3
+; CHECK-BE-NEXT: vextsh2d 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <8 x i16> %a, i32 0
%conv = sext i16 %vecext to i64
@@ -74,11 +98,17 @@ entry:
ret <2 x i64> %vecinit3
}
-define <2 x i64> @vextsw2d(<4 x i32> %a) {
-; PWR9-LABEL: vextsw2d:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsw2d 2, 2
-; PWR9-NEXT: blr
+define <2 x i64> @vextsw2dLE(<4 x i32> %a) {
+; CHECK-LE-LABEL: vextsw2dLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsw2d 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsw2dLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vmrgew
+; CHECK-BE-NEXT: vextsw2d 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <4 x i32> %a, i32 0
%conv = sext i32 %vecext to i64
@@ -88,3 +118,170 @@ entry:
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
ret <2 x i64> %vecinit3
}
+
+define <4 x i32> @vextsb2wBE(<16 x i8> %a) {
+; CHECK-BE-LABEL: vextsb2wBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsb2w 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsb2wBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 13
+; CHECK-LE-NEXT: vextsb2w 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 3
+ %conv = sext i8 %vecext to i32
+ %vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
+ %vecext1 = extractelement <16 x i8> %a, i32 7
+ %conv2 = sext i8 %vecext1 to i32
+ %vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1
+ %vecext4 = extractelement <16 x i8> %a, i32 11
+ %conv5 = sext i8 %vecext4 to i32
+ %vecinit6 = insertelement <4 x i32> %vecinit3, i32 %conv5, i32 2
+ %vecext7 = extractelement <16 x i8> %a, i32 15
+ %conv8 = sext i8 %vecext7 to i32
+ %vecinit9 = insertelement <4 x i32> %vecinit6, i32 %conv8, i32 3
+ ret <4 x i32> %vecinit9
+}
+
+define <2 x i64> @vextsb2dBE(<16 x i8> %a) {
+; CHECK-BE-LABEL: vextsb2dBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsb2d 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsb2dBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 9
+; CHECK-LE-NEXT: vextsb2d 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 7
+ %conv = sext i8 %vecext to i64
+ %vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %vecext1 = extractelement <16 x i8> %a, i32 15
+ %conv2 = sext i8 %vecext1 to i64
+ %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
+ ret <2 x i64> %vecinit3
+}
+
+define <4 x i32> @vextsh2wBE(<8 x i16> %a) {
+; CHECK-BE-LABEL: vextsh2wBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsh2w 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsh2wBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 14
+; CHECK-LE-NEXT: vextsh2w 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 1
+ %conv = sext i16 %vecext to i32
+ %vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
+ %vecext1 = extractelement <8 x i16> %a, i32 3
+ %conv2 = sext i16 %vecext1 to i32
+ %vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1
+ %vecext4 = extractelement <8 x i16> %a, i32 5
+ %conv5 = sext i16 %vecext4 to i32
+ %vecinit6 = insertelement <4 x i32> %vecinit3, i32 %conv5, i32 2
+ %vecext7 = extractelement <8 x i16> %a, i32 7
+ %conv8 = sext i16 %vecext7 to i32
+ %vecinit9 = insertelement <4 x i32> %vecinit6, i32 %conv8, i32 3
+ ret <4 x i32> %vecinit9
+}
+
+define <2 x i64> @vextsh2dBE(<8 x i16> %a) {
+; CHECK-BE-LABEL: vextsh2dBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsh2d 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsh2dBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 10
+; CHECK-LE-NEXT: vextsh2d 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 3
+ %conv = sext i16 %vecext to i64
+ %vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %vecext1 = extractelement <8 x i16> %a, i32 7
+ %conv2 = sext i16 %vecext1 to i64
+ %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
+ ret <2 x i64> %vecinit3
+}
+
+define <2 x i64> @vextsw2dBE(<4 x i32> %a) {
+; CHECK-BE-LABEL: vextsw2dBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsw2d 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsw2dBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 12
+; CHECK-LE-NEXT: vextsw2d 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 1
+ %conv = sext i32 %vecext to i64
+ %vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %vecext1 = extractelement <4 x i32> %a, i32 3
+ %conv2 = sext i32 %vecext1 to i64
+ %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
+ ret <2 x i64> %vecinit3
+}
+
+define <2 x i64> @vextDiffVectors(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LE-LABEL: vextDiffVectors:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NOT: vextsw2d
+
+; CHECK-BE-LABEL: vextDiffVectors:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NOT: vextsw2d
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 0
+ %conv = sext i32 %vecext to i64
+ %vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %vecext1 = extractelement <4 x i32> %b, i32 2
+ %conv2 = sext i32 %vecext1 to i64
+ %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
+ ret <2 x i64> %vecinit3
+}
+
+define <8 x i16> @testInvalidExtend(<16 x i8> %a) {
+entry:
+; CHECK-LE-LABEL: testInvalidExtend:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NOT: vexts
+
+; CHECK-BE-LABEL: testInvalidExtend:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NOT: vexts
+
+ %vecext = extractelement <16 x i8> %a, i32 0
+ %conv = sext i8 %vecext to i16
+ %vecinit = insertelement <8 x i16> undef, i16 %conv, i32 0
+ %vecext1 = extractelement <16 x i8> %a, i32 2
+ %conv2 = sext i8 %vecext1 to i16
+ %vecinit3 = insertelement <8 x i16> %vecinit, i16 %conv2, i32 1
+ %vecext4 = extractelement <16 x i8> %a, i32 4
+ %conv5 = sext i8 %vecext4 to i16
+ %vecinit6 = insertelement <8 x i16> %vecinit3, i16 %conv5, i32 2
+ %vecext7 = extractelement <16 x i8> %a, i32 6
+ %conv8 = sext i8 %vecext7 to i16
+ %vecinit9 = insertelement <8 x i16> %vecinit6, i16 %conv8, i32 3
+ %vecext10 = extractelement <16 x i8> %a, i32 8
+ %conv11 = sext i8 %vecext10 to i16
+ %vecinit12 = insertelement <8 x i16> %vecinit9, i16 %conv11, i32 4
+ %vecext13 = extractelement <16 x i8> %a, i32 10
+ %conv14 = sext i8 %vecext13 to i16
+ %vecinit15 = insertelement <8 x i16> %vecinit12, i16 %conv14, i32 5
+ %vecext16 = extractelement <16 x i8> %a, i32 12
+ %conv17 = sext i8 %vecext16 to i16
+ %vecinit18 = insertelement <8 x i16> %vecinit15, i16 %conv17, i32 6
+ %vecext19 = extractelement <16 x i8> %a, i32 14
+ %conv20 = sext i8 %vecext19 to i16
+ %vecinit21 = insertelement <8 x i16> %vecinit18, i16 %conv20, i32 7
+ ret <8 x i16> %vecinit21
+}
diff --git a/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll b/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
index 67146e40db0e..5346d8a429fb 100644
--- a/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
+++ b/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
@@ -321,8 +321,8 @@ entry:
; CHECK: lxsibzx 34, 0, 3
; CHECK-NEXT: vspltb 2, 2, 7
; CHECK-BE-LABEL: vecucus
-; CHECK-BE: li [[OFFSET:[0-9]+]], 1
-; CHECK-BE-NEXT: lxsibzx 34, 3, [[OFFSET]]
+; CHECK-BE: addi [[OFFSET:[0-9]+]], [[OFFSET]], 1
+; CHECK-BE-NEXT: lxsibzx 34, 0, [[OFFSET]]
; CHECK-BE-NEXT: vspltb 2, 2, 7
}
@@ -385,8 +385,8 @@ entry:
; CHECK: lxsibzx 34, 0, 3
; CHECK-NEXT: vspltb 2, 2, 7
; CHECK-BE-LABEL: vecscus
-; CHECK-BE: li [[OFFSET:[0-9]+]], 1
-; CHECK-BE-NEXT: lxsibzx 34, 3, [[OFFSET]]
+; CHECK-BE: addi [[OFFSET:[0-9]+]], [[OFFSET]], 1
+; CHECK-BE-NEXT: lxsibzx 34, 0, [[OFFSET]]
; CHECK-BE-NEXT: vspltb 2, 2, 7
}
@@ -487,8 +487,8 @@ entry:
; CHECK: lxsibzx 34, 0, 3
; CHECK-NEXT: vspltb 2, 2, 7
; CHECK-BE-LABEL: vecucss
-; CHECK-BE: li [[OFFSET:[0-9]+]], 1
-; CHECK-BE-NEXT: lxsibzx 34, 3, [[OFFSET]]
+; CHECK-BE: addi [[OFFSET:[0-9]+]], [[OFFSET]], 1
+; CHECK-BE-NEXT: lxsibzx 34, 0, [[OFFSET]]
; CHECK-BE-NEXT: vspltb 2, 2, 7
}
@@ -540,8 +540,8 @@ entry:
; CHECK: lxsibzx 34, 0, 3
; CHECK-NEXT: vspltb 2, 2, 7
; CHECK-BE-LABEL: vecscss
-; CHECK-BE: li [[OFFSET:[0-9]+]], 1
-; CHECK-BE-NEXT: lxsibzx 34, 3, [[OFFSET]]
+; CHECK-BE: addi [[OFFSET:[0-9]+]], [[OFFSET]], 1
+; CHECK-BE-NEXT: lxsibzx 34, 0, [[OFFSET]]
; CHECK-BE-NEXT: vspltb 2, 2, 7
}
diff --git a/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir b/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
new file mode 100644
index 000000000000..8798fcecfc3b
--- /dev/null
+++ b/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
@@ -0,0 +1,34 @@
+# RUN: llc -verify-machineinstrs -run-pass regallocfast -mtriple s390x-ibm-linux -o - %s | FileCheck %s
+--- |
+
+ @g_167 = external global [5 x i64], align 8
+ define void @main() local_unnamed_addr {
+ ret void
+ }
+...
+# Make sure the usage of different subregisters on the same virtual register
+# does not result in invalid kill flags.
+# PR33677
+---
+name: main
+alignment: 2
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr128bit }
+ - { id: 1, class: gr64bit }
+ - { id: 2, class: addr64bit }
+# CHECK: %r0q = L128
+# CHECK-NEXT: %r0l = COPY %r1l
+# Although R0L partially redefines R0Q, it must not mark R0Q as kill
+# because R1D is still live through that instruction.
+# CHECK-NOT: %r0q<imp-use,kill>
+# CHECK-NEXT: %r2d = COPY %r1d
+# CHECK-NEXT: LARL
+body: |
+ bb.0:
+ %0.subreg_hl32 = COPY %0.subreg_l32
+ %1 = COPY %0.subreg_l64
+ %2 = LARL @g_167
+ STC %1.subreg_l32, %2, 8, _
+
+...
diff --git a/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll b/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
index 9fcc0f5d617b..5c3800e97093 100644
--- a/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
+++ b/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
@@ -95,15 +95,17 @@ if.end:
}
; CHECK-LABEL: diamond2:
-; CHECK-BP: itte
-; CHECK-BP: streq
-; CHECK-BP: ldreq
-; CHECK-BP: strne
-; CHECK-NOBP: cbz
-; CHECK-NOBP: str
-; CHECK-NOBP: b
-; CHECK-NOBP: str
-; CHECK-NOBP: ldr
+; CHECK-BP: cbz
+; CHECK-BP: str
+; CHECK-BP: str
+; CHECK-BP: b
+; CHECK-BP: str
+; CHECK-BP: ldr
+; CHECK-NOBP: ittee
+; CHECK-NOBP: streq
+; CHECK-NOBP: ldreq
+; CHECK-NOBP: strne
+; CHECK-NOBP: strne
define i32 @diamond2(i32 %n, i32 %m, i32* %p, i32* %q) {
entry:
%tobool = icmp eq i32 %n, 0
@@ -111,6 +113,8 @@ entry:
if.then:
store i32 %n, i32* %p, align 4
+ %arrayidx = getelementptr inbounds i32, i32* %p, i32 2
+ store i32 %n, i32* %arrayidx, align 4
br label %if.end
if.else:
diff --git a/test/CodeGen/WebAssembly/umulo-i64.ll b/test/CodeGen/WebAssembly/umulo-i64.ll
new file mode 100644
index 000000000000..e47c8aa0bb3a
--- /dev/null
+++ b/test/CodeGen/WebAssembly/umulo-i64.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
+; Test that UMULO works correctly on 64-bit operands.
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-emscripten"
+
+; CHECK-LABEL: _ZN4core3num21_$LT$impl$u20$u64$GT$15overflowing_mul17h07be88b4cbac028fE:
+; CHECK: __multi3
+; Function Attrs: inlinehint
+define void @"_ZN4core3num21_$LT$impl$u20$u64$GT$15overflowing_mul17h07be88b4cbac028fE"(i64, i64) unnamed_addr #0 {
+start:
+ %2 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %0, i64 %1)
+ %3 = extractvalue { i64, i1 } %2, 0
+ store i64 %3, i64* undef
+ unreachable
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #1
+
+attributes #0 = { inlinehint }
+attributes #1 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/X86/2012-08-16-setcc.ll b/test/CodeGen/X86/2012-08-16-setcc.ll
index c03b923cadba..cba208e62a14 100644
--- a/test/CodeGen/X86/2012-08-16-setcc.ll
+++ b/test/CodeGen/X86/2012-08-16-setcc.ll
@@ -1,45 +1,53 @@
-; RUN: llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; rdar://12081007
-; CHECK-LABEL: and_1:
-; CHECK: andb
-; CHECK-NEXT: cmovnel
-; CHECK: ret
define i32 @and_1(i8 zeroext %a, i8 zeroext %b, i32 %x) {
+; CHECK-LABEL: and_1:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: andb %dil, %sil
+; CHECK-NEXT: cmovnel %edx, %eax
+; CHECK-NEXT: retq
%1 = and i8 %b, %a
%2 = icmp ne i8 %1, 0
%3 = select i1 %2, i32 %x, i32 0
ret i32 %3
}
-; CHECK-LABEL: and_2:
-; CHECK: andb
-; CHECK-NEXT: setne
-; CHECK: ret
define zeroext i1 @and_2(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: and_2:
+; CHECK: # BB#0:
+; CHECK-NEXT: andb %dil, %sil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: retq
%1 = and i8 %b, %a
%2 = icmp ne i8 %1, 0
ret i1 %2
}
-; CHECK-LABEL: xor_1:
-; CHECK: xorb
-; CHECK-NEXT: cmovnel
-; CHECK: ret
define i32 @xor_1(i8 zeroext %a, i8 zeroext %b, i32 %x) {
+; CHECK-LABEL: xor_1:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: xorb %dil, %sil
+; CHECK-NEXT: cmovnel %edx, %eax
+; CHECK-NEXT: retq
%1 = xor i8 %b, %a
%2 = icmp ne i8 %1, 0
%3 = select i1 %2, i32 %x, i32 0
ret i32 %3
}
-; CHECK-LABEL: xor_2:
-; CHECK: xorb
-; CHECK-NEXT: setne
-; CHECK: ret
define zeroext i1 @xor_2(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: xor_2:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorb %dil, %sil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: retq
%1 = xor i8 %b, %a
%2 = icmp ne i8 %1, 0
ret i1 %2
}
+
diff --git a/test/CodeGen/X86/GC/badreadproto.ll b/test/CodeGen/X86/GC/badreadproto.ll
index 37672f804357..aad79d75218a 100644
--- a/test/CodeGen/X86/GC/badreadproto.ll
+++ b/test/CodeGen/X86/GC/badreadproto.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
%list = type { i32, %list* }
diff --git a/test/CodeGen/X86/GC/badrootproto.ll b/test/CodeGen/X86/GC/badrootproto.ll
index ff86d03c646a..37a3451c2c17 100644
--- a/test/CodeGen/X86/GC/badrootproto.ll
+++ b/test/CodeGen/X86/GC/badrootproto.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
%list = type { i32, %list* }
%meta = type opaque
diff --git a/test/CodeGen/X86/GC/badwriteproto.ll b/test/CodeGen/X86/GC/badwriteproto.ll
index 2544e40f81ff..62c157477635 100644
--- a/test/CodeGen/X86/GC/badwriteproto.ll
+++ b/test/CodeGen/X86/GC/badwriteproto.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
%list = type { i32, %list* }
diff --git a/test/CodeGen/X86/GC/fat.ll b/test/CodeGen/X86/GC/fat.ll
index d05ca3da8195..316a80343e2f 100644
--- a/test/CodeGen/X86/GC/fat.ll
+++ b/test/CodeGen/X86/GC/fat.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
declare void @llvm.gcroot(i8**, i8*) nounwind
diff --git a/test/CodeGen/X86/GC/outside.ll b/test/CodeGen/X86/GC/outside.ll
index 2968c6917ce1..55eda5453789 100644
--- a/test/CodeGen/X86/GC/outside.ll
+++ b/test/CodeGen/X86/GC/outside.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
declare void @llvm.gcroot(i8**, i8*)
diff --git a/test/CodeGen/X86/GlobalISel/GV.ll b/test/CodeGen/X86/GlobalISel/GV.ll
new file mode 100644
index 000000000000..44862ab5a96e
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/GV.ll
@@ -0,0 +1,63 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=x86_64-apple-darwin -global-isel -verify-machineinstrs -relocation-model=pic < %s -o - | FileCheck %s --check-prefix=X64_DARWIN_PIC
+; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32ABI
+
+@g_int = global i32 0, align 4
+
+; Function Attrs: noinline nounwind optnone uwtable
+define i32* @test_global_ptrv() #3 {
+; X64-LABEL: test_global_ptrv:
+; X64: # BB#0: # %entry
+; X64-NEXT: leaq g_int, %rax
+; X64-NEXT: retq
+;
+; X64_DARWIN_PIC-LABEL: test_global_ptrv:
+; X64_DARWIN_PIC: ## BB#0: ## %entry
+; X64_DARWIN_PIC-NEXT: leaq _g_int(%rip), %rax
+; X64_DARWIN_PIC-NEXT: retq
+;
+; X32-LABEL: test_global_ptrv:
+; X32: # BB#0: # %entry
+; X32-NEXT: leal g_int, %eax
+; X32-NEXT: retl
+;
+; X32ABI-LABEL: test_global_ptrv:
+; X32ABI: # BB#0: # %entry
+; X32ABI-NEXT: leal g_int, %eax
+; X32ABI-NEXT: retq
+entry:
+ ret i32* @g_int
+}
+
+; Function Attrs: noinline nounwind optnone uwtable
+define i32 @test_global_valv() #3 {
+; X64-LABEL: test_global_valv:
+; X64: # BB#0: # %entry
+; X64-NEXT: leaq g_int, %rax
+; X64-NEXT: movl (%rax), %eax
+; X64-NEXT: retq
+;
+; X64_DARWIN_PIC-LABEL: test_global_valv:
+; X64_DARWIN_PIC: ## BB#0: ## %entry
+; X64_DARWIN_PIC-NEXT: leaq _g_int(%rip), %rax
+; X64_DARWIN_PIC-NEXT: movl (%rax), %eax
+; X64_DARWIN_PIC-NEXT: retq
+;
+; X32-LABEL: test_global_valv:
+; X32: # BB#0: # %entry
+; X32-NEXT: leal g_int, %eax
+; X32-NEXT: movl (%eax), %eax
+; X32-NEXT: retl
+;
+; X32ABI-LABEL: test_global_valv:
+; X32ABI: # BB#0: # %entry
+; X32ABI-NEXT: leal g_int, %eax
+; X32ABI-NEXT: movl (%eax), %eax
+; X32ABI-NEXT: retq
+entry:
+ %0 = load i32, i32* @g_int, align 4
+ ret i32 %0
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/add-vec.ll b/test/CodeGen/X86/GlobalISel/add-vec.ll
index 679a49d733a2..0ea1cf820c0f 100644
--- a/test/CodeGen/X86/GlobalISel/add-vec.ll
+++ b/test/CodeGen/X86/GlobalISel/add-vec.ll
@@ -1,38 +1,41 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=SKX
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=core-avx2 -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=corei7-avx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
+
define <16 x i8> @test_add_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
-; SKX-LABEL: test_add_v16i8:
-; SKX: # BB#0:
-; SKX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
-; SKX-NEXT: retq
+; ALL-LABEL: test_add_v16i8:
+; ALL: # BB#0:
+; ALL-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; ALL-NEXT: retq
%ret = add <16 x i8> %arg1, %arg2
ret <16 x i8> %ret
}
define <8 x i16> @test_add_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
-; SKX-LABEL: test_add_v8i16:
-; SKX: # BB#0:
-; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; SKX-NEXT: retq
+; ALL-LABEL: test_add_v8i16:
+; ALL: # BB#0:
+; ALL-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; ALL-NEXT: retq
%ret = add <8 x i16> %arg1, %arg2
ret <8 x i16> %ret
}
define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
-; SKX-LABEL: test_add_v4i32:
-; SKX: # BB#0:
-; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; SKX-NEXT: retq
+; ALL-LABEL: test_add_v4i32:
+; ALL: # BB#0:
+; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; ALL-NEXT: retq
%ret = add <4 x i32> %arg1, %arg2
ret <4 x i32> %ret
}
define <2 x i64> @test_add_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
-; SKX-LABEL: test_add_v2i64:
-; SKX: # BB#0:
-; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; SKX-NEXT: retq
+; ALL-LABEL: test_add_v2i64:
+; ALL: # BB#0:
+; ALL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; ALL-NEXT: retq
%ret = add <2 x i64> %arg1, %arg2
ret <2 x i64> %ret
}
@@ -42,6 +45,20 @@ define <32 x i8> @test_add_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
; SKX: # BB#0:
; SKX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
+;
+; AVX2-LABEL: test_add_v32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX1-LABEL: test_add_v32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
%ret = add <32 x i8> %arg1, %arg2
ret <32 x i8> %ret
}
@@ -51,6 +68,20 @@ define <16 x i16> @test_add_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
; SKX: # BB#0:
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
+;
+; AVX2-LABEL: test_add_v16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX1-LABEL: test_add_v16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
%ret = add <16 x i16> %arg1, %arg2
ret <16 x i16> %ret
}
@@ -60,6 +91,20 @@ define <8 x i32> @test_add_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
; SKX: # BB#0:
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
+;
+; AVX2-LABEL: test_add_v8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX1-LABEL: test_add_v8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
%ret = add <8 x i32> %arg1, %arg2
ret <8 x i32> %ret
}
@@ -69,6 +114,20 @@ define <4 x i64> @test_add_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
; SKX: # BB#0:
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
+;
+; AVX2-LABEL: test_add_v4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX1-LABEL: test_add_v4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
%ret = add <4 x i64> %arg1, %arg2
ret <4 x i64> %ret
}
@@ -78,6 +137,26 @@ define <64 x i8> @test_add_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
; SKX: # BB#0:
; SKX-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
+;
+; AVX2-LABEL: test_add_v64i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX1-LABEL: test_add_v64i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
+; AVX1-NEXT: vpaddb %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
+; AVX1-NEXT: vpaddb %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT: retq
%ret = add <64 x i8> %arg1, %arg2
ret <64 x i8> %ret
}
@@ -87,6 +166,26 @@ define <32 x i16> @test_add_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
; SKX: # BB#0:
; SKX-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
+;
+; AVX2-LABEL: test_add_v32i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX1-LABEL: test_add_v32i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
+; AVX1-NEXT: vpaddw %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
+; AVX1-NEXT: vpaddw %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT: retq
%ret = add <32 x i16> %arg1, %arg2
ret <32 x i16> %ret
}
@@ -96,6 +195,26 @@ define <16 x i32> @test_add_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
; SKX: # BB#0:
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
+;
+; AVX2-LABEL: test_add_v16i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX1-LABEL: test_add_v16i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
+; AVX1-NEXT: vpaddd %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
+; AVX1-NEXT: vpaddd %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT: retq
%ret = add <16 x i32> %arg1, %arg2
ret <16 x i32> %ret
}
@@ -105,6 +224,26 @@ define <8 x i64> @test_add_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
; SKX: # BB#0:
; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
+;
+; AVX2-LABEL: test_add_v8i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX1-LABEL: test_add_v8i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT: retq
%ret = add <8 x i64> %arg1, %arg2
ret <8 x i64> %ret
}
diff --git a/test/CodeGen/X86/GlobalISel/constant.ll b/test/CodeGen/X86/GlobalISel/constant.ll
index b550bb0bc7be..5b512f9ce937 100644
--- a/test/CodeGen/X86/GlobalISel/constant.ll
+++ b/test/CodeGen/X86/GlobalISel/constant.ll
@@ -51,4 +51,13 @@ define i64 @const_i64_i32() {
ret i64 -1
}
+define void @main(i32 ** %data) {
+; ALL-LABEL: main:
+; ALL: # BB#0:
+; ALL-NEXT: movq $0, %rax
+; ALL-NEXT: movq %rax, (%rdi)
+; ALL-NEXT: retq
+ store i32* null, i32** %data, align 8
+ ret void
+}
diff --git a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
index b08ac062fb4b..11b03bd56110 100644
--- a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
+++ b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
-; TODO merge with ext.ll after i64 sext suported on 32bit platform
+; TODO merge with ext.ll after i64 sext supported on 32bit platform
define i64 @test_zext_i1(i8 %a) {
; X64-LABEL: test_zext_i1:
diff --git a/test/CodeGen/X86/GlobalISel/ext.ll b/test/CodeGen/X86/GlobalISel/ext.ll
index 392c973c1208..d9a09678cf4b 100644
--- a/test/CodeGen/X86/GlobalISel/ext.ll
+++ b/test/CodeGen/X86/GlobalISel/ext.ll
@@ -2,6 +2,42 @@
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32
+define i8 @test_zext_i1toi8(i32 %a) {
+; X64-LABEL: test_zext_i1toi8:
+; X64: # BB#0:
+; X64-NEXT: andb $1, %dil
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+;
+; X32-LABEL: test_zext_i1toi8:
+; X32: # BB#0:
+; X32-NEXT: movl 4(%esp), %eax
+; X32-NEXT: andb $1, %al
+; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: retl
+ %val = trunc i32 %a to i1
+ %r = zext i1 %val to i8
+ ret i8 %r
+}
+
+define i16 @test_zext_i1toi16(i32 %a) {
+; X64-LABEL: test_zext_i1toi16:
+; X64: # BB#0:
+; X64-NEXT: andw $1, %di
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+;
+; X32-LABEL: test_zext_i1toi16:
+; X32: # BB#0:
+; X32-NEXT: movl 4(%esp), %eax
+; X32-NEXT: andw $1, %ax
+; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: retl
+ %val = trunc i32 %a to i1
+ %r = zext i1 %val to i16
+ ret i16 %r
+}
+
define i32 @test_zext_i1(i32 %a) {
; X64-LABEL: test_zext_i1:
; X64: # BB#0:
diff --git a/test/CodeGen/X86/GlobalISel/legalize-GV.mir b/test/CodeGen/X86/GlobalISel/legalize-GV.mir
new file mode 100644
index 000000000000..7f9971e4c70a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-GV.mir
@@ -0,0 +1,31 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+--- |
+
+ @g_int = global i32 0, align 4
+
+ define i32* @test_global_ptrv() {
+ entry:
+ ret i32* @g_int
+ }
+...
+---
+name: test_global_ptrv
+# ALL-LABEL: name: test_global_ptrv
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+# ALL: %0(p0) = G_GLOBAL_VALUE @g_int
+# ALL-NEXT: %rax = COPY %0(p0)
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1.entry:
+ %0(p0) = G_GLOBAL_VALUE @g_int
+ %rax = COPY %0(p0)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-ext.mir b/test/CodeGen/X86/GlobalISel/legalize-ext.mir
index c9add0dc4e95..c86bfd9ee96d 100644
--- a/test/CodeGen/X86/GlobalISel/legalize-ext.mir
+++ b/test/CodeGen/X86/GlobalISel/legalize-ext.mir
@@ -1,12 +1,28 @@
# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
--- |
- define i32 @test_zext_i1(i8 %a) {
+
+ define i8 @test_zext_i1toi8(i1 %a) {
+ %r = zext i1 %a to i8
+ ret i8 %r
+ }
+
+ define i16 @test_zext_i1toi16(i1 %a) {
+ %r = zext i1 %a to i16
+ ret i16 %r
+ }
+
+ define i32 @test_zext_i1(i8 %a) {
%val = trunc i8 %a to i1
%r = zext i1 %val to i32
ret i32 %r
}
+ define i16 @test_zext_i8toi16(i8 %val) {
+ %r = zext i8 %val to i16
+ ret i16 %r
+ }
+
define i32 @test_zext_i8(i8 %val) {
%r = zext i8 %val to i32
ret i32 %r
@@ -17,12 +33,27 @@
ret i32 %r
}
+ define i8 @test_sext_i1toi8(i1 %a) {
+ %r = sext i1 %a to i8
+ ret i8 %r
+ }
+
+ define i16 @test_sext_i1toi16(i1 %a) {
+ %r = sext i1 %a to i16
+ ret i16 %r
+ }
+
define i32 @test_sext_i1(i8 %a) {
%val = trunc i8 %a to i1
%r = sext i1 %val to i32
ret i32 %r
}
+ define i16 @test_sext_i8toi16(i8 %val) {
+ %r = sext i8 %val to i16
+ ret i16 %r
+ }
+
define i32 @test_sext_i8(i8 %val) {
%r = sext i8 %val to i32
ret i32 %r
@@ -35,6 +66,52 @@
...
---
+name: test_zext_i1toi8
+# ALL-LABEL: name: test_zext_i1toi8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+# ALL: %0(s1) = COPY %edi
+# ALL-NEXT: %1(s8) = G_ZEXT %0(s1)
+# ALL-NEXT: %al = COPY %1(s8)
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s1) = COPY %edi
+ %1(s8) = G_ZEXT %0(s1)
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_zext_i1toi16
+# ALL-LABEL: name: test_zext_i1toi16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+# ALL: %0(s1) = COPY %edi
+# ALL-NEXT: %1(s16) = G_ZEXT %0(s1)
+# ALL-NEXT: %ax = COPY %1(s16)
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s1) = COPY %edi
+ %1(s16) = G_ZEXT %0(s1)
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
name: test_zext_i1
# ALL-LABEL: name: test_zext_i1
alignment: 4
@@ -61,6 +138,29 @@ body: |
...
---
+name: test_zext_i8toi16
+# ALL-LABEL: name: test_zext_i8toi16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+# ALL: %0(s8) = COPY %edi
+# ALL-NEXT: %1(s16) = G_ZEXT %0(s8)
+# ALL-NEXT: %ax = COPY %1(s16)
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s8) = COPY %edi
+ %1(s16) = G_ZEXT %0(s8)
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
name: test_zext_i8
# ALL-LABEL: name: test_zext_i8
alignment: 4
@@ -107,6 +207,52 @@ body: |
...
---
+name: test_sext_i1toi8
+# ALL-LABEL: name: test_sext_i1toi8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+# ALL: %0(s1) = COPY %edi
+# ALL-NEXT: %1(s8) = G_SEXT %0(s1)
+# ALL-NEXT: %al = COPY %1(s8)
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s1) = COPY %edi
+ %1(s8) = G_SEXT %0(s1)
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_sext_i1toi16
+# ALL-LABEL: name: test_sext_i1toi16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+# ALL: %0(s1) = COPY %edi
+# ALL-NEXT: %1(s16) = G_SEXT %0(s1)
+# ALL-NEXT: %ax = COPY %1(s16)
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s1) = COPY %edi
+ %1(s16) = G_SEXT %0(s1)
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
name: test_sext_i1
# ALL-LABEL: name: test_sext_i1
alignment: 4
@@ -133,6 +279,29 @@ body: |
...
---
+name: test_sext_i8toi16
+# ALL-LABEL: name: test_sext_i8toi16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+# ALL: %0(s8) = COPY %edi
+# ALL-NEXT: %1(s16) = G_SEXT %0(s8)
+# ALL-NEXT: %ax = COPY %1(s16)
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s8) = COPY %edi
+ %1(s16) = G_SEXT %0(s8)
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
name: test_sext_i8
# ALL-LABEL: name: test_sext_i8
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir
new file mode 100644
index 000000000000..60d9fc63c14a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir
@@ -0,0 +1,110 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+
+--- |
+ define void @test_memop_s8tos32() {
+ ret void
+ }
+
+ define void @test_memop_s64() {
+ ret void
+ }
+...
+---
+name: test_memop_s8tos32
+# ALL-LABEL: name: test_memop_s8tos32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
+ - { id: 6, class: _, preferred-register: '' }
+ - { id: 7, class: _, preferred-register: '' }
+ - { id: 8, class: _, preferred-register: '' }
+ - { id: 9, class: _, preferred-register: '' }
+ - { id: 10, class: _, preferred-register: '' }
+# ALL: %0(p0) = IMPLICIT_DEF
+# ALL-NEXT: %11(s8) = G_LOAD %0(p0) :: (load 1)
+# ALL-NEXT: %9(s1) = G_TRUNC %11(s8)
+# ALL-NEXT: %1(s8) = G_LOAD %0(p0) :: (load 1)
+# ALL-NEXT: %2(s16) = G_LOAD %0(p0) :: (load 2)
+# ALL-NEXT: %3(s32) = G_LOAD %0(p0) :: (load 4)
+# ALL-NEXT: %4(p0) = G_LOAD %0(p0) :: (load 8)
+# ALL-NEXT: %10(s1) = IMPLICIT_DEF
+# ALL-NEXT: %12(s8) = G_ZEXT %10(s1)
+# ALL-NEXT: G_STORE %12(s8), %0(p0) :: (store 1)
+# ALL-NEXT: %5(s8) = IMPLICIT_DEF
+# ALL-NEXT: G_STORE %5(s8), %0(p0) :: (store 1)
+# ALL-NEXT: %6(s16) = IMPLICIT_DEF
+# ALL-NEXT: G_STORE %6(s16), %0(p0) :: (store 2)
+# ALL-NEXT: %7(s32) = IMPLICIT_DEF
+# ALL-NEXT: G_STORE %7(s32), %0(p0) :: (store 4)
+# ALL-NEXT: %8(p0) = IMPLICIT_DEF
+# ALL-NEXT: G_STORE %8(p0), %0(p0) :: (store 8)
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = IMPLICIT_DEF
+ %9(s1) = G_LOAD %0(p0) :: (load 1)
+ %1(s8) = G_LOAD %0(p0) :: (load 1)
+ %2(s16) = G_LOAD %0(p0) :: (load 2)
+ %3(s32) = G_LOAD %0(p0) :: (load 4)
+ %4(p0) = G_LOAD %0(p0) :: (load 8)
+
+ %10(s1) = IMPLICIT_DEF
+ G_STORE %10, %0 :: (store 1)
+ %5(s8) = IMPLICIT_DEF
+ G_STORE %5, %0 :: (store 1)
+ %6(s16) = IMPLICIT_DEF
+ G_STORE %6, %0 :: (store 2)
+ %7(s32) = IMPLICIT_DEF
+ G_STORE %7, %0 :: (store 4)
+ %8(p0) = IMPLICIT_DEF
+ G_STORE %8, %0 :: (store 8)
+...
+---
+name: test_memop_s64
+# ALL-LABEL: name: test_memop_s64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+# X64: %0(p0) = IMPLICIT_DEF
+# X64-NEXT: %1(s64) = G_LOAD %0(p0) :: (load 8)
+# X64-NEXT: %2(s64) = IMPLICIT_DEF
+# X64-NEXT: G_STORE %2(s64), %0(p0) :: (store 8)
+#
+# X32: %0(p0) = IMPLICIT_DEF
+# X32-NEXT: %3(s32) = G_LOAD %0(p0) :: (load 8)
+# X32-NEXT: %6(s32) = G_CONSTANT i32 4
+# X32-NEXT: %5(p0) = G_GEP %0, %6(s32)
+# X32-NEXT: %4(s32) = G_LOAD %5(p0) :: (load 8)
+# X32-NEXT: %1(s64) = G_MERGE_VALUES %3(s32), %4(s32)
+# X32-NEXT: %2(s64) = IMPLICIT_DEF
+# X32-NEXT: %7(s32), %8(s32) = G_UNMERGE_VALUES %2(s64)
+# X32-NEXT: G_STORE %7(s32), %0(p0) :: (store 8)
+# X32-NEXT: %10(s32) = G_CONSTANT i32 4
+# X32-NEXT: %9(p0) = G_GEP %0, %10(s32)
+# X32-NEXT: G_STORE %8(s32), %9(p0) :: (store 8)
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = IMPLICIT_DEF
+ %1(s64) = G_LOAD %0(p0) :: (load 8)
+
+ %2(s64) = IMPLICIT_DEF
+ G_STORE %2, %0 :: (store 8)
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
index 2757e6493258..1c719b1bf74d 100644
--- a/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
@@ -4,6 +4,16 @@
;TODO merge with x86-64 tests (many operations not suppored yet)
+define i1 @test_load_i1(i1 * %p1) {
+; ALL-LABEL: test_load_i1:
+; ALL: # BB#0:
+; ALL-NEXT: movl 4(%esp), %eax
+; ALL-NEXT: movb (%eax), %al
+; ALL-NEXT: retl
+ %r = load i1, i1* %p1
+ ret i1 %r
+}
+
define i8 @test_load_i8(i8 * %p1) {
; ALL-LABEL: test_load_i8:
; ALL: # BB#0:
@@ -34,6 +44,18 @@ define i32 @test_load_i32(i32 * %p1) {
ret i32 %r
}
+define i1 * @test_store_i1(i1 %val, i1 * %p1) {
+; ALL-LABEL: test_store_i1:
+; ALL: # BB#0:
+; ALL-NEXT: movb 4(%esp), %cl
+; ALL-NEXT: movl 8(%esp), %eax
+; ALL-NEXT: andb $1, %cl
+; ALL-NEXT: movb %cl, (%eax)
+; ALL-NEXT: retl
+ store i1 %val, i1* %p1
+ ret i1 * %p1;
+}
+
define i8 * @test_store_i8(i8 %val, i8 * %p1) {
; ALL-LABEL: test_store_i8:
; ALL: # BB#0:
diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
index 2e04b3cf20b3..2097a3b0bfc9 100644
--- a/test/CodeGen/X86/GlobalISel/memop-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
@@ -2,6 +2,15 @@
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_FAST
; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_GREEDY
+define i1 @test_load_i1(i1 * %p1) {
+; ALL-LABEL: test_load_i1:
+; ALL: # BB#0:
+; ALL-NEXT: movb (%rdi), %al
+; ALL-NEXT: retq
+ %r = load i1, i1* %p1
+ ret i1 %r
+}
+
define i8 @test_load_i8(i8 * %p1) {
; ALL-LABEL: test_load_i8:
; ALL: # BB#0:
@@ -70,6 +79,17 @@ define double @test_load_double(double * %p1) {
ret double %r
}
+define i1 * @test_store_i1(i1 %val, i1 * %p1) {
+; ALL-LABEL: test_store_i1:
+; ALL: # BB#0:
+; ALL-NEXT: andb $1, %dil
+; ALL-NEXT: movb %dil, (%rsi)
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ store i1 %val, i1* %p1
+ ret i1 * %p1;
+}
+
define i32 * @test_store_i32(i32 %val, i32 * %p1) {
; ALL-LABEL: test_store_i32:
; ALL: # BB#0:
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index 3658bc9af957..95ef15ceb689 100644
--- a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -174,6 +174,13 @@
ret i64 %ret
}
+ @g_int = global i32 0, align 4
+
+ define i32* @test_global_ptrv() {
+ entry:
+ ret i32* @g_int
+ }
+
...
---
name: test_add_i8
@@ -1084,4 +1091,24 @@ body: |
RET 0, implicit %rax
...
+---
+name: test_global_ptrv
+# CHECK-LABEL: name: test_global_ptrv
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+# CHECK: %0(p0) = G_GLOBAL_VALUE @g_int
+# CHECK-NEXT: %rax = COPY %0(p0)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1.entry:
+ %0(p0) = G_GLOBAL_VALUE @g_int
+ %rax = COPY %0(p0)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-GV.mir b/test/CodeGen/X86/GlobalISel/select-GV.mir
new file mode 100644
index 000000000000..2f2fd51d99d1
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-GV.mir
@@ -0,0 +1,99 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64ALL --check-prefix=X64
+# RUN: llc -mtriple=x86_64-apple-darwin -relocation-model=pic -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64ALL --check-prefix=X64_DARWIN_PIC
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ALL --check-prefix=X32
+# RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ALL --check-prefix=X32ABI
+
+--- |
+
+ @g_int = global i32 0, align 4
+
+ define i32* @test_global_ptrv() {
+ entry:
+ ret i32* @g_int
+ }
+
+ define i32 @test_global_valv() {
+ entry:
+ %0 = load i32, i32* @g_int, align 4
+ ret i32 %0
+ }
+
+...
+---
+name: test_global_ptrv
+# CHECK-LABEL: name: test_global_ptrv
+alignment: 4
+legalized: true
+regBankSelected: true
+# X64ALL: registers:
+# X64ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+#
+# X32ALL: registers:
+# X32ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+# X64: %0 = LEA64r _, 1, _, @g_int, _
+# X64-NEXT: %rax = COPY %0
+# X64-NEXT: RET 0, implicit %rax
+#
+# X64_DARWIN_PIC: %0 = LEA64r %rip, 1, _, @g_int, _
+# X64_DARWIN_PIC-NEXT: %rax = COPY %0
+# X64_DARWIN_PIC-NEXT: RET 0, implicit %rax
+#
+# X32: %0 = LEA32r _, 1, _, @g_int, _
+# X32-NEXT: %rax = COPY %0
+# X32-NEXT: RET 0, implicit %rax
+#
+# X32ABI: %0 = LEA64_32r _, 1, _, @g_int, _
+# X32ABI-NEXT: %rax = COPY %0
+# X32ABI-NEXT: RET 0, implicit %rax
+body: |
+ bb.1.entry:
+ %0(p0) = G_GLOBAL_VALUE @g_int
+ %rax = COPY %0(p0)
+ RET 0, implicit %rax
+
+...
+---
+name: test_global_valv
+# CHECK-LABEL: name: test_global_valv
+alignment: 4
+legalized: true
+regBankSelected: true
+# X64ALL: registers:
+# X64ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+# X64ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' }
+#
+# X32ALL: registers:
+# X32ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+# X32ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+# X64: %1 = LEA64r _, 1, _, @g_int, _
+# X64-NEXT: %0 = MOV32rm %1, 1, _, 0, _ :: (load 4 from @g_int)
+# X64-NEXT: %eax = COPY %0
+# X64-NEXT: RET 0, implicit %eax
+#
+# X64_DARWIN_PIC: %1 = LEA64r %rip, 1, _, @g_int, _
+# X64_DARWIN_PIC-NEXT: %0 = MOV32rm %1, 1, _, 0, _ :: (load 4 from @g_int)
+# X64_DARWIN_PIC-NEXT: %eax = COPY %0
+# X64_DARWIN_PIC-NEXT: RET 0, implicit %eax
+#
+# X32: %1 = LEA32r _, 1, _, @g_int, _
+# X32-NEXT: %0 = MOV32rm %1, 1, _, 0, _ :: (load 4 from @g_int)
+# X32-NEXT: %eax = COPY %0
+# X32-NEXT: RET 0, implicit %eax
+#
+# X32ABI: %1 = LEA64_32r _, 1, _, @g_int, _
+# X32ABI-NEXT: %0 = MOV32rm %1, 1, _, 0, _ :: (load 4 from @g_int)
+# X32ABI-NEXT: %eax = COPY %0
+# X32ABI-NEXT: RET 0, implicit %eax
+body: |
+ bb.1.entry:
+ %1(p0) = G_GLOBAL_VALUE @g_int
+ %0(s32) = G_LOAD %1(p0) :: (load 4 from @g_int)
+ %eax = COPY %0(s32)
+ RET 0, implicit %eax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-constant.mir b/test/CodeGen/X86/GlobalISel/select-constant.mir
index 4b91b5f9f098..30f57418b4ce 100644
--- a/test/CodeGen/X86/GlobalISel/select-constant.mir
+++ b/test/CodeGen/X86/GlobalISel/select-constant.mir
@@ -29,6 +29,11 @@
ret i64 -1
}
+ define void @main(i32** %data) {
+ store i32* null, i32** %data, align 8
+ ret void
+ }
+
...
---
name: const_i8
@@ -162,3 +167,29 @@ body: |
RET 0, implicit %rax
...
+---
+name: main
+# CHECK-LABEL: name: main
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gr64, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+# CHECK: %0 = COPY %rdi
+# CHECK-NEXT: %1 = MOV64ri32 0
+# CHECK-NEXT: MOV64mr %0, 1, _, 0, _, %1 :: (store 8 into %ir.data)
+# CHECK-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(p0) = G_CONSTANT i64 0
+ G_STORE %1(p0), %0(p0) :: (store 8 into %ir.data)
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-ext.mir b/test/CodeGen/X86/GlobalISel/select-ext.mir
index b52f1f6fa621..b6734e5aa2b8 100644
--- a/test/CodeGen/X86/GlobalISel/select-ext.mir
+++ b/test/CodeGen/X86/GlobalISel/select-ext.mir
@@ -2,6 +2,16 @@
# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
--- |
+ define i8 @test_zext_i1toi8(i1 %a) {
+ %r = zext i1 %a to i8
+ ret i8 %r
+ }
+
+ define i16 @test_zext_i1toi16(i1 %a) {
+ %r = zext i1 %a to i16
+ ret i16 %r
+ }
+
define i32 @test_zext_i1(i1 %a) {
%r = zext i1 %a to i32
ret i32 %r
@@ -29,6 +39,60 @@
...
---
+name: test_zext_i1toi8
+# ALL-LABEL: name: test_zext_i1toi8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr8, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %1 = AND8ri %0, 1, implicit-def %eflags
+# ALL-NEXT: %al = COPY %1
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s1) = COPY %edi
+ %1(s8) = G_ZEXT %0(s1)
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_zext_i1toi16
+# ALL-LABEL: name: test_zext_i1toi16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr16, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %2 = SUBREG_TO_REG 0, %0, 1
+# ALL-NEXT: %1 = AND16ri8 %2, 1, implicit-def %eflags
+# ALL-NEXT: %ax = COPY %1
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s1) = COPY %edi
+ %1(s16) = G_ZEXT %0(s1)
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
name: test_zext_i1
# ALL-LABEL: name: test_zext_i1
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir b/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
new file mode 100644
index 000000000000..09dc5344796f
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
@@ -0,0 +1,53 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
+--- |
+ define void @test_unmerge() {
+ ret void
+ }
+
+...
+---
+name: test_unmerge
+# AVX-LABEL: name: test_unmerge
+#
+# AVX512VL-LABEL: name: test_unmerge
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX: registers:
+# AVX-NEXT: - { id: 0, class: vr256, preferred-register: '' }
+# AVX-NEXT: - { id: 1, class: vr128, preferred-register: '' }
+# AVX-NEXT: - { id: 2, class: vr128, preferred-register: '' }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256x, preferred-register: '' }
+# AVX512VL-NEXT: - { id: 1, class: vr128x, preferred-register: '' }
+# AVX512VL-NEXT: - { id: 2, class: vr128x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX: %0 = IMPLICIT_DEF
+# AVX-NEXT: %1 = COPY %0.sub_xmm
+# AVX-NEXT: %2 = VEXTRACTF128rr %0, 1
+# AVX-NEXT: %xmm0 = COPY %1
+# AVX-NEXT: %xmm1 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0, implicit %xmm1
+#
+# AVX512VL: %0 = IMPLICIT_DEF
+# AVX512VL-NEXT: %1 = COPY %0.sub_xmm
+# AVX512VL-NEXT: %2 = VEXTRACTF32x4Z256rr %0, 1
+# AVX512VL-NEXT: %xmm0 = COPY %1
+# AVX512VL-NEXT: %xmm1 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit %xmm0, implicit %xmm1
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<8 x s32>) = IMPLICIT_DEF
+ %1(<4 x s32>), %2(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>)
+ %xmm0 = COPY %1(<4 x s32>)
+ %xmm1 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0, implicit %xmm1
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir b/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
new file mode 100644
index 000000000000..a63733d07f6a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+--- |
+ define void @test_unmerge_v128() {
+ ret void
+ }
+
+ define void @test_unmerge_v256() {
+ ret void
+ }
+
+...
+---
+name: test_unmerge_v128
+# ALL-LABEL: name: test_unmerge_v128
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: vr128x, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: vr128x, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: vr128x, preferred-register: '' }
+# ALL-NEXT: - { id: 4, class: vr128x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+ - { id: 3, class: vecr }
+ - { id: 4, class: vecr }
+# ALL: %0 = IMPLICIT_DEF
+# ALL-NEXT: %1 = COPY %0.sub_xmm
+# ALL-NEXT: %2 = VEXTRACTF32x4Zrr %0, 1
+# ALL-NEXT: %3 = VEXTRACTF32x4Zrr %0, 2
+# ALL-NEXT: %4 = VEXTRACTF32x4Zrr %0, 3
+# ALL-NEXT: %xmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<16 x s32>) = IMPLICIT_DEF
+ %1(<4 x s32>), %2(<4 x s32>), %3(<4 x s32>), %4(<4 x s32>) = G_UNMERGE_VALUES %0(<16 x s32>)
+ %xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_unmerge_v256
+# ALL-LABEL: name: test_unmerge_v256
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: vr256x, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: vr256x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = IMPLICIT_DEF
+# ALL-NEXT: %1 = COPY %0.sub_ymm
+# ALL-NEXT: %2 = VEXTRACTF64x4Zrr %0, 1
+# ALL-NEXT: %xmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit %ymm0
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<16 x s32>) = IMPLICIT_DEF
+ %1(<8 x s32>), %2(<8 x s32>) = G_UNMERGE_VALUES %0(<16 x s32>)
+ %xmm0 = COPY %1(<8 x s32>)
+ RET 0, implicit %ymm0
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll b/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
new file mode 100644
index 000000000000..2743f882b2e4
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
@@ -0,0 +1,18 @@
+; RUN: llc -O0 -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 -pass-remarks-missed='gisel*' -verify-machineinstrs %s -o %t.out 2> %t.err
+; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-OUT < %t.out
+; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-ERR < %t.err
+; This file checks that the fallback path to selection dag works.
+; The test is fragile in the sense that it must be updated to expose
+; something that fails with global-isel.
+; When we cannot produce a test case anymore, that means we can remove
+; the fallback path.
+
+; Check that we fallback on invoke translation failures.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg1<def>(s80) = G_FCONSTANT x86_fp80 0xK4002A000000000000000
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump
+; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump:
+define void @test_x86_fp80_dump(x86_fp80* %ptr){
+ store x86_fp80 0xK4002A000000000000000, x86_fp80* %ptr, align 16
+ ret void
+}
+
diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll
index e5f7cc5c6dd8..640b5215afe9 100644
--- a/test/CodeGen/X86/avg.ll
+++ b/test/CodeGen/X86/avg.ll
@@ -2624,7 +2624,8 @@ define void @avg_v64i8_const(<64 x i8>* %a) {
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [1,2,3,4,5,6,7,8,1,2,3,4,5,6,7,8]
+; AVX512F-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3]
; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3
; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2
; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1
@@ -2941,7 +2942,8 @@ define void @avg_v32i16_const(<32 x i16>* %a) {
; AVX512F: # BB#0:
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [1,2,3,4,5,6,7,8,1,2,3,4,5,6,7,8]
+; AVX512F-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vpsrld $1, %zmm0, %zmm0
diff --git a/test/CodeGen/X86/avx-cmp.ll b/test/CodeGen/X86/avx-cmp.ll
index a050d6abe56f..963878b0f563 100644
--- a/test/CodeGen/X86/avx-cmp.ll
+++ b/test/CodeGen/X86/avx-cmp.ll
@@ -1,25 +1,59 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
-
-; CHECK: vcmpltps %ymm
-; CHECK-NOT: vucomiss
-define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind readnone {
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
+
+define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind {
+; CHECK-LABEL: cmp00:
+; CHECK: # BB#0:
+; CHECK-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = fcmp olt <8 x float> %a, %b
%s = sext <8 x i1> %bincmp to <8 x i32>
ret <8 x i32> %s
}
-; CHECK: vcmpltpd %ymm
-; CHECK-NOT: vucomisd
-define <4 x i64> @cmp01(<4 x double> %a, <4 x double> %b) nounwind readnone {
+define <4 x i64> @cmp01(<4 x double> %a, <4 x double> %b) nounwind {
+; CHECK-LABEL: cmp01:
+; CHECK: # BB#0:
+; CHECK-NEXT: vcmpltpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = fcmp olt <4 x double> %a, %b
%s = sext <4 x i1> %bincmp to <4 x i64>
ret <4 x i64> %s
}
-declare void @scale() nounwind uwtable
-
-; CHECK: vucomisd
-define void @render() nounwind uwtable {
+declare void @scale() nounwind
+
+define void @render() nounwind {
+; CHECK-LABEL: render:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: jne .LBB2_6
+; CHECK-NEXT: # BB#1: # %for.cond5.preheader
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: jmp .LBB2_2
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB2_5: # %if.then
+; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT: callq scale
+; CHECK-NEXT: .LBB2_2: # %for.cond5
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: testb %bl, %bl
+; CHECK-NEXT: jne .LBB2_2
+; CHECK-NEXT: # BB#3: # %for.cond5
+; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT: testb %bl, %bl
+; CHECK-NEXT: je .LBB2_2
+; CHECK-NEXT: # BB#4: # %for.body33
+; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT: vucomisd {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT: jne .LBB2_5
+; CHECK-NEXT: jp .LBB2_5
+; CHECK-NEXT: jmp .LBB2_2
+; CHECK-NEXT: .LBB2_6: # %for.end52
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
entry:
br i1 undef, label %for.cond5, label %for.end52
@@ -42,89 +76,113 @@ for.end52:
ret void
}
-; CHECK: vextractf128 $1
-; CHECK: vextractf128 $1
-; CHECK-NEXT: vpcmpgtd %xmm
-; CHECK-NEXT: vpcmpgtd %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <8 x i32> @int256-cmp(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+define <8 x i32> @int256_cmp(<8 x i32> %i, <8 x i32> %j) nounwind {
+; CHECK-LABEL: int256_cmp:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = icmp slt <8 x i32> %i, %j
%x = sext <8 x i1> %bincmp to <8 x i32>
ret <8 x i32> %x
}
-; CHECK: vextractf128 $1
-; CHECK: vextractf128 $1
-; CHECK-NEXT: vpcmpgtq %xmm
-; CHECK-NEXT: vpcmpgtq %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <4 x i64> @v4i64-cmp(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+define <4 x i64> @v4i64_cmp(<4 x i64> %i, <4 x i64> %j) nounwind {
+; CHECK-LABEL: v4i64_cmp:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = icmp slt <4 x i64> %i, %j
%x = sext <4 x i1> %bincmp to <4 x i64>
ret <4 x i64> %x
}
-; CHECK: vextractf128 $1
-; CHECK: vextractf128 $1
-; CHECK-NEXT: vpcmpgtw %xmm
-; CHECK-NEXT: vpcmpgtw %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <16 x i16> @v16i16-cmp(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+define <16 x i16> @v16i16_cmp(<16 x i16> %i, <16 x i16> %j) nounwind {
+; CHECK-LABEL: v16i16_cmp:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = icmp slt <16 x i16> %i, %j
%x = sext <16 x i1> %bincmp to <16 x i16>
ret <16 x i16> %x
}
-; CHECK: vextractf128 $1
-; CHECK: vextractf128 $1
-; CHECK-NEXT: vpcmpgtb %xmm
-; CHECK-NEXT: vpcmpgtb %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <32 x i8> @v32i8-cmp(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+define <32 x i8> @v32i8_cmp(<32 x i8> %i, <32 x i8> %j) nounwind {
+; CHECK-LABEL: v32i8_cmp:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = icmp slt <32 x i8> %i, %j
%x = sext <32 x i1> %bincmp to <32 x i8>
ret <32 x i8> %x
}
-; CHECK: vextractf128 $1
-; CHECK: vextractf128 $1
-; CHECK-NEXT: vpcmpeqd %xmm
-; CHECK-NEXT: vpcmpeqd %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <8 x i32> @int256-cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind {
+; CHECK-LABEL: int256_cmpeq:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vpcmpeqd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = icmp eq <8 x i32> %i, %j
%x = sext <8 x i1> %bincmp to <8 x i32>
ret <8 x i32> %x
}
-; CHECK: vextractf128 $1
-; CHECK: vextractf128 $1
-; CHECK-NEXT: vpcmpeqq %xmm
-; CHECK-NEXT: vpcmpeqq %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <4 x i64> @v4i64-cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind {
+; CHECK-LABEL: v4i64_cmpeq:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vpcmpeqq %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = icmp eq <4 x i64> %i, %j
%x = sext <4 x i1> %bincmp to <4 x i64>
ret <4 x i64> %x
}
-; CHECK: vextractf128 $1
-; CHECK: vextractf128 $1
-; CHECK-NEXT: vpcmpeqw %xmm
-; CHECK-NEXT: vpcmpeqw %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <16 x i16> @v16i16-cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind {
+; CHECK-LABEL: v16i16_cmpeq:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vpcmpeqw %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = icmp eq <16 x i16> %i, %j
%x = sext <16 x i1> %bincmp to <16 x i16>
ret <16 x i16> %x
}
-; CHECK: vextractf128 $1
-; CHECK: vextractf128 $1
-; CHECK-NEXT: vpcmpeqb %xmm
-; CHECK-NEXT: vpcmpeqb %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <32 x i8> @v32i8-cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind {
+; CHECK-LABEL: v32i8_cmpeq:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vpcmpeqb %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%bincmp = icmp eq <32 x i8> %i, %j
%x = sext <32 x i1> %bincmp to <32 x i8>
ret <32 x i8> %x
@@ -132,17 +190,28 @@ define <32 x i8> @v32i8-cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
;; Scalar comparison
-; CHECK: scalarcmpA
-; CHECK: vcmpeqsd
define i32 @scalarcmpA() uwtable ssp {
+; CHECK-LABEL: scalarcmpA:
+; CHECK: # BB#0:
+; CHECK-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vcmpeqsd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vmovq %xmm0, %rax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; CHECK-NEXT: retq
%cmp29 = fcmp oeq double undef, 0.000000e+00
%res = zext i1 %cmp29 to i32
ret i32 %res
}
-; CHECK: scalarcmpB
-; CHECK: vcmpeqss
define i32 @scalarcmpB() uwtable ssp {
+; CHECK-LABEL: scalarcmpB:
+; CHECK: # BB#0:
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vcmpeqss %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: retq
%cmp29 = fcmp oeq float undef, 0.000000e+00
%res = zext i1 %cmp29 to i32
ret i32 %res
diff --git a/test/CodeGen/X86/avx-load-store.ll b/test/CodeGen/X86/avx-load-store.ll
index d7eceb7cce66..06aadc476e4c 100644
--- a/test/CodeGen/X86/avx-load-store.ll
+++ b/test/CodeGen/X86/avx-load-store.ll
@@ -1,13 +1,62 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
-; RUN: llc -O0 < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -check-prefix=CHECK_O0
-
-; CHECK: vmovaps
-; CHECK: vmovaps
-; CHECK: vmovaps
-; CHECK: vmovaps
-; CHECK: vmovaps
-; CHECK: vmovaps
-define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s
+; RUN: llc -O0 < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s -check-prefix=CHECK_O0
+
+define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind {
+; CHECK-LABEL: test_256_load:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $96, %rsp
+; CHECK-NEXT: movq %rdx, %r14
+; CHECK-NEXT: movq %rsi, %r15
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: vmovaps (%rbx), %ymm0
+; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps (%r15), %ymm1
+; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps (%r14), %ymm2
+; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
+; CHECK-NEXT: callq dummy
+; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vmovaps %ymm0, (%rbx)
+; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vmovaps %ymm0, (%r15)
+; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vmovaps %ymm0, (%r14)
+; CHECK-NEXT: addq $96, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; CHECK_O0-LABEL: test_256_load:
+; CHECK_O0: # BB#0: # %entry
+; CHECK_O0-NEXT: subq $152, %rsp
+; CHECK_O0-NEXT: vmovapd (%rdi), %ymm0
+; CHECK_O0-NEXT: vmovaps (%rsi), %ymm1
+; CHECK_O0-NEXT: vmovdqa (%rdx), %ymm2
+; CHECK_O0-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK_O0-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK_O0-NEXT: vmovups %ymm2, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK_O0-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK_O0-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK_O0-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK_O0-NEXT: callq dummy
+; CHECK_O0-NEXT: movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload
+; CHECK_O0-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; CHECK_O0-NEXT: vmovapd %ymm0, (%rdx)
+; CHECK_O0-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; CHECK_O0-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm1 # 32-byte Reload
+; CHECK_O0-NEXT: vmovaps %ymm1, (%rsi)
+; CHECK_O0-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; CHECK_O0-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm2 # 32-byte Reload
+; CHECK_O0-NEXT: vmovdqa %ymm2, (%rdi)
+; CHECK_O0-NEXT: addq $152, %rsp
+; CHECK_O0-NEXT: vzeroupper
+; CHECK_O0-NEXT: retq
entry:
%0 = bitcast double* %d to <4 x double>*
%tmp1.i = load <4 x double>, <4 x double>* %0, align 32
@@ -27,62 +76,115 @@ declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
;; The two tests below check that we must fold load + scalar_to_vector
;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
-; CHECK: mov00
define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
+; CHECK-LABEL: mov00:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: retq
+;
+; CHECK_O0-LABEL: mov00:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK_O0-NEXT: # implicit-def: %YMM1
+; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
+; CHECK_O0-NEXT: vxorps %ymm2, %ymm2, %ymm2
+; CHECK_O0-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3,4,5,6,7]
+; CHECK_O0-NEXT: retq
%val = load float, float* %ptr
-; CHECK: vmovss (%
%i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
ret <8 x float> %i0
-; CHECK: ret
}
-; CHECK: mov01
define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
+; CHECK-LABEL: mov01:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: retq
+;
+; CHECK_O0-LABEL: mov01:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK_O0-NEXT: # implicit-def: %YMM1
+; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
+; CHECK_O0-NEXT: vxorps %ymm2, %ymm2, %ymm2
+; CHECK_O0-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3]
+; CHECK_O0-NEXT: retq
%val = load double, double* %ptr
-; CHECK: vmovsd (%
%i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
ret <4 x double> %i0
-; CHECK: ret
}
-; CHECK: vmovaps %ymm
define void @storev16i16(<16 x i16> %a) nounwind {
+; CHECK-LABEL: storev16i16:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps %ymm0, (%rax)
+;
+; CHECK_O0-LABEL: storev16i16:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: # implicit-def: %RAX
+; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 32
unreachable
}
-; CHECK: storev16i16_01
-; CHECK: vextractf128
-; CHECK: vmovups %xmm
define void @storev16i16_01(<16 x i16> %a) nounwind {
+; CHECK-LABEL: storev16i16_01:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax)
+; CHECK-NEXT: vmovups %xmm0, (%rax)
+;
+; CHECK_O0-LABEL: storev16i16_01:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: # implicit-def: %RAX
+; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 4
unreachable
}
-; CHECK: storev32i8
-; CHECK: vmovaps %ymm
define void @storev32i8(<32 x i8> %a) nounwind {
+; CHECK-LABEL: storev32i8:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps %ymm0, (%rax)
+;
+; CHECK_O0-LABEL: storev32i8:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: # implicit-def: %RAX
+; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 32
unreachable
}
-; CHECK: storev32i8_01
-; CHECK: vextractf128
-; CHECK: vmovups %xmm
define void @storev32i8_01(<32 x i8> %a) nounwind {
+; CHECK-LABEL: storev32i8_01:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax)
+; CHECK-NEXT: vmovups %xmm0, (%rax)
+;
+; CHECK_O0-LABEL: storev32i8_01:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: # implicit-def: %RAX
+; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 4
unreachable
}
; It is faster to make two saves, if the data is already in XMM registers. For
; example, after making an integer operation.
-; CHECK: _double_save
-; CHECK-NOT: vinsertf128 $1
-; CHECK-NOT: vinsertf128 $0
-; CHECK: vmovaps %xmm
-; CHECK: vmovaps %xmm
define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
-entry:
+; CHECK-LABEL: double_save:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps %xmm1, 16(%rdi)
+; CHECK-NEXT: vmovaps %xmm0, (%rdi)
+; CHECK-NEXT: retq
+;
+; CHECK_O0-LABEL: double_save:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: # implicit-def: %YMM2
+; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
+; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
+; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
+; CHECK_O0-NEXT: vzeroupper
+; CHECK_O0-NEXT: retq
%Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i32> %Z, <8 x i32>* %P, align 16
ret void
@@ -90,60 +192,127 @@ entry:
declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind
-; CHECK_O0: _f_f
-; CHECK-O0: vmovss LCPI
-; CHECK-O0: vxorps %xmm
-; CHECK-O0: vmovss %xmm
define void @f_f() nounwind {
+; CHECK-LABEL: f_f:
+; CHECK: # BB#0: # %allocas
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: jne .LBB8_2
+; CHECK-NEXT: # BB#1: # %cif_mask_all
+; CHECK-NEXT: .LBB8_2: # %cif_mask_mixed
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: jne .LBB8_4
+; CHECK-NEXT: # BB#3: # %cif_mixed_test_all
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: vmovd %eax, %xmm0
+; CHECK-NEXT: vmaskmovps %ymm0, %ymm0, (%rax)
+; CHECK-NEXT: .LBB8_4: # %cif_mixed_test_any_check
+;
+; CHECK_O0-LABEL: f_f:
+; CHECK_O0: # BB#0: # %allocas
+; CHECK_O0-NEXT: # implicit-def: %AL
+; CHECK_O0-NEXT: testb $1, %al
+; CHECK_O0-NEXT: jne .LBB8_1
+; CHECK_O0-NEXT: jmp .LBB8_2
+; CHECK_O0-NEXT: .LBB8_1: # %cif_mask_all
+; CHECK_O0-NEXT: .LBB8_2: # %cif_mask_mixed
+; CHECK_O0-NEXT: # implicit-def: %AL
+; CHECK_O0-NEXT: testb $1, %al
+; CHECK_O0-NEXT: jne .LBB8_3
+; CHECK_O0-NEXT: jmp .LBB8_4
+; CHECK_O0-NEXT: .LBB8_3: # %cif_mixed_test_all
+; CHECK_O0-NEXT: movl $-1, %eax
+; CHECK_O0-NEXT: vmovd %eax, %xmm0
+; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
+; CHECK_O0-NEXT: # implicit-def: %RCX
+; CHECK_O0-NEXT: # implicit-def: %YMM2
+; CHECK_O0-NEXT: vmaskmovps %ymm2, %ymm1, (%rcx)
+; CHECK_O0-NEXT: .LBB8_4: # %cif_mixed_test_any_check
allocas:
br i1 undef, label %cif_mask_all, label %cif_mask_mixed
-cif_mask_all: ; preds = %allocas
+cif_mask_all:
unreachable
-cif_mask_mixed: ; preds = %allocas
+cif_mask_mixed:
br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
-cif_mixed_test_all: ; preds = %cif_mask_mixed
+cif_mixed_test_all:
call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> <i32 -1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef) nounwind
unreachable
-cif_mixed_test_any_check: ; preds = %cif_mask_mixed
+cif_mixed_test_any_check:
unreachable
}
-; CHECK: add8i32
-; CHECK: vmovups
-; CHECK: vmovups
-; CHECK-NOT: vinsertf128
-; CHECK-NOT: vextractf128
-; CHECK: vmovups
-; CHECK: vmovups
define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
+; CHECK-LABEL: add8i32:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovups (%rsi), %xmm0
+; CHECK-NEXT: vmovups 16(%rsi), %xmm1
+; CHECK-NEXT: vmovups %xmm1, 16(%rdi)
+; CHECK-NEXT: vmovups %xmm0, (%rdi)
+; CHECK-NEXT: retq
+;
+; CHECK_O0-LABEL: add8i32:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm0
+; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1
+; CHECK_O0-NEXT: # implicit-def: %YMM2
+; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
+; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
+; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
+; CHECK_O0-NEXT: vzeroupper
+; CHECK_O0-NEXT: retq
%b = load <8 x i32>, <8 x i32>* %bp, align 1
%x = add <8 x i32> zeroinitializer, %b
store <8 x i32> %x, <8 x i32>* %ret, align 1
ret void
}
-; CHECK: add4i64a64
-; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
-; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
+; CHECK-LABEL: add4i64a64:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps (%rsi), %ymm0
+; CHECK-NEXT: vmovaps %ymm0, (%rdi)
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; CHECK_O0-LABEL: add4i64a64:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: vmovaps (%rsi), %ymm0
+; CHECK_O0-NEXT: vmovdqa %ymm0, (%rdi)
+; CHECK_O0-NEXT: vzeroupper
+; CHECK_O0-NEXT: retq
%b = load <4 x i64>, <4 x i64>* %bp, align 64
%x = add <4 x i64> zeroinitializer, %b
store <4 x i64> %x, <4 x i64>* %ret, align 64
ret void
}
-; CHECK: add4i64a16
-; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
-; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
-; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
-; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
+; CHECK-LABEL: add4i64a16:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps (%rsi), %xmm0
+; CHECK-NEXT: vmovaps 16(%rsi), %xmm1
+; CHECK-NEXT: vmovaps %xmm1, 16(%rdi)
+; CHECK-NEXT: vmovaps %xmm0, (%rdi)
+; CHECK-NEXT: retq
+;
+; CHECK_O0-LABEL: add4i64a16:
+; CHECK_O0: # BB#0:
+; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm0
+; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1
+; CHECK_O0-NEXT: # implicit-def: %YMM2
+; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
+; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
+; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
+; CHECK_O0-NEXT: vzeroupper
+; CHECK_O0-NEXT: retq
%b = load <4 x i64>, <4 x i64>* %bp, align 16
%x = add <4 x i64> zeroinitializer, %b
store <4 x i64> %x, <4 x i64>* %ret, align 16
ret void
}
+
diff --git a/test/CodeGen/X86/avx-schedule.ll b/test/CodeGen/X86/avx-schedule.ll
index 47e95fe31bdf..a12a412fb94d 100644
--- a/test/CodeGen/X86/avx-schedule.ll
+++ b/test/CodeGen/X86/avx-schedule.ll
@@ -10,8 +10,8 @@ define <4 x double> @test_addpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; SANDY-LABEL: test_addpd:
; SANDY: # BB#0:
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addpd:
; HASWELL: # BB#0:
@@ -21,14 +21,14 @@ define <4 x double> @test_addpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; BTVER2-LABEL: test_addpd:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fadd <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -40,8 +40,8 @@ define <8 x float> @test_addps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; SANDY-LABEL: test_addps:
; SANDY: # BB#0:
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addps:
; HASWELL: # BB#0:
@@ -51,14 +51,14 @@ define <8 x float> @test_addps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; BTVER2-LABEL: test_addps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fadd <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
@@ -70,8 +70,8 @@ define <4 x double> @test_addsubpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
; SANDY-LABEL: test_addsubpd:
; SANDY: # BB#0:
; SANDY-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsubpd:
; HASWELL: # BB#0:
@@ -81,14 +81,14 @@ define <4 x double> @test_addsubpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
;
; BTVER2-LABEL: test_addsubpd:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; BTVER2-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addsubpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -101,8 +101,8 @@ define <8 x float> @test_addsubps(<8 x float> %a0, <8 x float> %a1, <8 x float>
; SANDY-LABEL: test_addsubps:
; SANDY: # BB#0:
; SANDY-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsubps:
; HASWELL: # BB#0:
@@ -112,14 +112,14 @@ define <8 x float> @test_addsubps(<8 x float> %a0, <8 x float> %a1, <8 x float>
;
; BTVER2-LABEL: test_addsubps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; BTVER2-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addsubps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
@@ -131,10 +131,10 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi
define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; SANDY-LABEL: test_andnotpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andnotpd:
; HASWELL: # BB#0:
@@ -147,14 +147,14 @@ define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
; BTVER2: # BB#0:
; BTVER2-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andnotpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
@@ -172,10 +172,10 @@ define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_andnotps:
; SANDY: # BB#0:
-; SANDY-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andnotps:
; HASWELL: # BB#0:
@@ -188,14 +188,14 @@ define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float>
; BTVER2: # BB#0:
; BTVER2-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andnotps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
@@ -213,10 +213,10 @@ define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float>
define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; SANDY-LABEL: test_andpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andpd:
; HASWELL: # BB#0:
@@ -229,14 +229,14 @@ define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; BTVER2: # BB#0:
; BTVER2-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
@@ -252,10 +252,10 @@ define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_andps:
; SANDY: # BB#0:
-; SANDY-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andps:
; HASWELL: # BB#0:
@@ -268,14 +268,14 @@ define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; BTVER2: # BB#0:
; BTVER2-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
@@ -291,10 +291,10 @@ define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; SANDY-LABEL: test_blendpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
+; SANDY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendpd:
; HASWELL: # BB#0:
@@ -306,14 +306,14 @@ define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x doubl
; BTVER2-LABEL: test_blendpd:
; BTVER2: # BB#0:
; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
-; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -326,9 +326,9 @@ define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x doubl
define <8 x float> @test_blendps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_blendps:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
-; SANDY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:1.00]
+; SANDY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendps:
; HASWELL: # BB#0:
@@ -356,9 +356,9 @@ define <8 x float> @test_blendps(<8 x float> %a0, <8 x float> %a1, <8 x float> *
define <4 x double> @test_blendvpd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
; SANDY-LABEL: test_blendvpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; SANDY-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; SANDY-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendvpd:
; HASWELL: # BB#0:
@@ -387,9 +387,9 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
define <8 x float> @test_blendvps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
; SANDY-LABEL: test_blendvps:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; SANDY-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; SANDY-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendvps:
; HASWELL: # BB#0:
@@ -418,8 +418,8 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f
define <8 x float> @test_broadcastf128(<4 x float> *%a0) {
; SANDY-LABEL: test_broadcastf128:
; SANDY: # BB#0:
-; SANDY-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastf128:
; HASWELL: # BB#0:
@@ -443,8 +443,8 @@ define <8 x float> @test_broadcastf128(<4 x float> *%a0) {
define <4 x double> @test_broadcastsd_ymm(double *%a0) {
; SANDY-LABEL: test_broadcastsd_ymm:
; SANDY: # BB#0:
-; SANDY-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastsd_ymm:
; HASWELL: # BB#0:
@@ -469,8 +469,8 @@ define <4 x double> @test_broadcastsd_ymm(double *%a0) {
define <4 x float> @test_broadcastss(float *%a0) {
; SANDY-LABEL: test_broadcastss:
; SANDY: # BB#0:
-; SANDY-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [4:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastss:
; HASWELL: # BB#0:
@@ -496,7 +496,7 @@ define <8 x float> @test_broadcastss_ymm(float *%a0) {
; SANDY-LABEL: test_broadcastss_ymm:
; SANDY: # BB#0:
; SANDY-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastss_ymm:
; HASWELL: # BB#0:
@@ -522,9 +522,9 @@ define <4 x double> @test_cmppd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; SANDY-LABEL: test_cmppd:
; SANDY: # BB#0:
; SANDY-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
-; SANDY-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmppd:
; HASWELL: # BB#0:
@@ -560,9 +560,9 @@ define <8 x float> @test_cmpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; SANDY-LABEL: test_cmpps:
; SANDY: # BB#0:
; SANDY-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
-; SANDY-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmpps:
; HASWELL: # BB#0:
@@ -598,9 +598,9 @@ define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
; SANDY-LABEL: test_cvtdq2pd:
; SANDY: # BB#0:
; SANDY-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [4:1.00]
-; SANDY-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:1.00]
+; SANDY-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [10:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtdq2pd:
; HASWELL: # BB#0:
@@ -613,14 +613,14 @@ define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtdq2pd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:1.00]
; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = sitofp <4 x i32> %a0 to <4 x double>
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
@@ -632,12 +632,12 @@ define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
; SANDY-LABEL: test_cvtdq2ps:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [4:1.00]
-; SANDY-NEXT: vmovaps (%rdi), %xmm1 # sched: [4:0.50]
-; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm1, %ymm1 # sched: [5:1.00]
-; SANDY-NEXT: vcvtdq2ps %ymm1, %ymm1 # sched: [4:1.00]
+; SANDY-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmovaps (%rdi), %xmm1 # sched: [6:0.50]
+; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm1, %ymm1 # sched: [7:1.00]
+; SANDY-NEXT: vcvtdq2ps %ymm1, %ymm1 # sched: [3:1.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtdq2ps:
; HASWELL: # BB#0:
@@ -650,14 +650,14 @@ define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtdq2ps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:1.00]
; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = sitofp <8 x i32> %a0 to <8 x float>
%2 = load <8 x i32>, <8 x i32> *%a1, align 16
@@ -669,10 +669,10 @@ define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
define <8 x i32> @test_cvtpd2dq(<4 x double> %a0, <4 x double> *%a1) {
; SANDY-LABEL: test_cvtpd2dq:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [4:1.00]
+; SANDY-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2dq:
; HASWELL: # BB#0:
@@ -704,10 +704,10 @@ define <8 x i32> @test_cvtpd2dq(<4 x double> %a0, <4 x double> *%a1) {
define <8 x float> @test_cvtpd2ps(<4 x double> %a0, <4 x double> *%a1) {
; SANDY-LABEL: test_cvtpd2ps:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [4:1.00]
+; SANDY-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2ps:
; HASWELL: # BB#0:
@@ -741,8 +741,8 @@ define <8 x i32> @test_cvtps2dq(<8 x float> %a0, <8 x float> *%a1) {
; SANDY: # BB#0:
; SANDY-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [7:1.00]
-; SANDY-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtps2dq:
; HASWELL: # BB#0:
@@ -774,9 +774,9 @@ define <8 x i32> @test_cvtps2dq(<8 x float> %a0, <8 x float> *%a1) {
define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; SANDY-LABEL: test_divpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [12:1.00]
-; SANDY-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [16:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [45:3.00]
+; SANDY-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [52:3.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divpd:
; HASWELL: # BB#0:
@@ -786,14 +786,14 @@ define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; BTVER2-LABEL: test_divpd:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [19:19.00]
-; BTVER2-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [24:19.00]
+; BTVER2-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
+; BTVER2-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_divpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [19:19.00]
-; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [24:19.00]
+; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
+; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fdiv <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -804,9 +804,9 @@ define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_divps:
; SANDY: # BB#0:
-; SANDY-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [12:1.00]
-; SANDY-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [16:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [29:3.00]
+; SANDY-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [36:3.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divps:
; HASWELL: # BB#0:
@@ -816,14 +816,14 @@ define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; BTVER2-LABEL: test_divps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [19:19.00]
-; BTVER2-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [24:19.00]
+; BTVER2-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
+; BTVER2-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_divps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [19:19.00]
-; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [24:19.00]
+; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
+; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fdiv <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
@@ -834,9 +834,9 @@ define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define <8 x float> @test_dpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_dpps:
; SANDY: # BB#0:
-; SANDY-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:2.00]
; SANDY-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_dpps:
; HASWELL: # BB#0:
@@ -866,9 +866,9 @@ define <4 x float> @test_extractf128(<8 x float> %a0, <8 x float> %a1, <4 x floa
; SANDY-LABEL: test_extractf128:
; SANDY: # BB#0:
; SANDY-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:1.00]
-; SANDY-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
+; SANDY-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_extractf128:
; HASWELL: # BB#0:
@@ -900,7 +900,7 @@ define <4 x double> @test_haddpd(<4 x double> %a0, <4 x double> %a1, <4 x double
; SANDY: # BB#0:
; SANDY-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_haddpd:
; HASWELL: # BB#0:
@@ -929,9 +929,9 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_haddps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_haddps:
; SANDY: # BB#0:
-; SANDY-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
+; SANDY-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_haddps:
; HASWELL: # BB#0:
@@ -960,9 +960,9 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_hsubpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; SANDY-LABEL: test_hsubpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
+; SANDY-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_hsubpd:
; HASWELL: # BB#0:
@@ -991,9 +991,9 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_hsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_hsubps:
; SANDY: # BB#0:
-; SANDY-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
+; SANDY-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_hsubps:
; HASWELL: # BB#0:
@@ -1023,9 +1023,9 @@ define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float
; SANDY-LABEL: test_insertf128:
; SANDY: # BB#0:
; SANDY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00]
-; SANDY-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_insertf128:
; HASWELL: # BB#0:
@@ -1038,14 +1038,14 @@ define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float
; BTVER2: # BB#0:
; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50]
; BTVER2-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_insertf128:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50]
; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -1059,8 +1059,8 @@ define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float
define <32 x i8> @test_lddqu(i8* %a0) {
; SANDY-LABEL: test_lddqu:
; SANDY: # BB#0:
-; SANDY-NEXT: vlddqu (%rdi), %ymm0 # sched: [4:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vlddqu (%rdi), %ymm0 # sched: [6:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lddqu:
; HASWELL: # BB#0:
@@ -1084,10 +1084,10 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readonly
define <2 x double> @test_maskmovpd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) {
; SANDY-LABEL: test_maskmovpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00]
-; SANDY-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00]
+; SANDY-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:2.00]
+; SANDY-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovpd:
; HASWELL: # BB#0:
@@ -1119,10 +1119,10 @@ declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind
define <4 x double> @test_maskmovpd_ymm(i8* %a0, <4 x i64> %a1, <4 x double> %a2) {
; SANDY-LABEL: test_maskmovpd_ymm:
; SANDY: # BB#0:
-; SANDY-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00]
+; SANDY-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [5:1.00]
; SANDY-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
; SANDY-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovpd_ymm:
; HASWELL: # BB#0:
@@ -1154,10 +1154,10 @@ declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) nounwi
define <4 x float> @test_maskmovps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) {
; SANDY-LABEL: test_maskmovps:
; SANDY: # BB#0:
-; SANDY-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00]
-; SANDY-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00]
+; SANDY-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:2.00]
+; SANDY-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovps:
; HASWELL: # BB#0:
@@ -1189,10 +1189,10 @@ declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind
define <8 x float> @test_maskmovps_ymm(i8* %a0, <8 x i32> %a1, <8 x float> %a2) {
; SANDY-LABEL: test_maskmovps_ymm:
; SANDY: # BB#0:
-; SANDY-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00]
+; SANDY-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [1:0.50]
; SANDY-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
; SANDY-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovps_ymm:
; HASWELL: # BB#0:
@@ -1225,8 +1225,8 @@ define <4 x double> @test_maxpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; SANDY-LABEL: test_maxpd:
; SANDY: # BB#0:
; SANDY-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxpd:
; HASWELL: # BB#0:
@@ -1256,8 +1256,8 @@ define <8 x float> @test_maxps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; SANDY-LABEL: test_maxps:
; SANDY: # BB#0:
; SANDY-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxps:
; HASWELL: # BB#0:
@@ -1288,7 +1288,7 @@ define <4 x double> @test_minpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; SANDY: # BB#0:
; SANDY-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minpd:
; HASWELL: # BB#0:
@@ -1319,7 +1319,7 @@ define <8 x float> @test_minps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; SANDY: # BB#0:
; SANDY-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minps:
; HASWELL: # BB#0:
@@ -1348,10 +1348,10 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) {
; SANDY-LABEL: test_movapd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovapd (%rdi), %ymm0 # sched: [4:0.50]
+; SANDY-NEXT: vmovapd (%rdi), %ymm0 # sched: [7:0.50]
; SANDY-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovapd %ymm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movapd:
; HASWELL: # BB#0:
@@ -1363,14 +1363,14 @@ define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) {
; BTVER2-LABEL: test_movapd:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovapd (%rdi), %ymm0 # sched: [5:1.00]
-; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movapd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = load <4 x double>, <4 x double> *%a0, align 32
@@ -1382,10 +1382,10 @@ define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) {
define <8 x float> @test_movaps(<8 x float> *%a0, <8 x float> *%a1) {
; SANDY-LABEL: test_movaps:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovaps (%rdi), %ymm0 # sched: [4:0.50]
+; SANDY-NEXT: vmovaps (%rdi), %ymm0 # sched: [7:0.50]
; SANDY-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovaps %ymm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movaps:
; HASWELL: # BB#0:
@@ -1397,14 +1397,14 @@ define <8 x float> @test_movaps(<8 x float> *%a0, <8 x float> *%a1) {
; BTVER2-LABEL: test_movaps:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovaps (%rdi), %ymm0 # sched: [5:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movaps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = load <8 x float>, <8 x float> *%a0, align 32
@@ -1417,9 +1417,9 @@ define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) {
; SANDY-LABEL: test_movddup:
; SANDY: # BB#0:
; SANDY-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
-; SANDY-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [4:0.50]
+; SANDY-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movddup:
; HASWELL: # BB#0:
@@ -1432,14 +1432,14 @@ define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00]
; BTVER2-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movddup:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00]
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
%2 = load <4 x double>, <4 x double> *%a1, align 32
@@ -1451,9 +1451,9 @@ define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) {
define i32 @test_movmskpd(<4 x double> %a0) {
; SANDY-LABEL: test_movmskpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.33]
+; SANDY-NEXT: vmovmskpd %ymm0, %eax # sched: [2:1.00]
; SANDY-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movmskpd:
; HASWELL: # BB#0:
@@ -1479,9 +1479,9 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone
define i32 @test_movmskps(<8 x float> %a0) {
; SANDY-LABEL: test_movmskps:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.33]
+; SANDY-NEXT: vmovmskps %ymm0, %eax # sched: [3:1.00]
; SANDY-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movmskps:
; HASWELL: # BB#0:
@@ -1508,8 +1508,8 @@ define <4 x double> @test_movntpd(<4 x double> %a0, <4 x double> *%a1) {
; SANDY-LABEL: test_movntpd:
; SANDY: # BB#0:
; SANDY-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovntpd %ymm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntpd:
; HASWELL: # BB#0:
@@ -1519,13 +1519,13 @@ define <4 x double> @test_movntpd(<4 x double> %a0, <4 x double> *%a1) {
;
; BTVER2-LABEL: test_movntpd:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fadd <4 x double> %a0, %a0
@@ -1537,8 +1537,8 @@ define <8 x float> @test_movntps(<8 x float> %a0, <8 x float> *%a1) {
; SANDY-LABEL: test_movntps:
; SANDY: # BB#0:
; SANDY-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovntps %ymm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntps:
; HASWELL: # BB#0:
@@ -1548,13 +1548,13 @@ define <8 x float> @test_movntps(<8 x float> %a0, <8 x float> *%a1) {
;
; BTVER2-LABEL: test_movntps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fadd <8 x float> %a0, %a0
@@ -1566,9 +1566,9 @@ define <8 x float> @test_movshdup(<8 x float> %a0, <8 x float> *%a1) {
; SANDY-LABEL: test_movshdup:
; SANDY: # BB#0:
; SANDY-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
-; SANDY-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [4:0.50]
+; SANDY-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movshdup:
; HASWELL: # BB#0:
@@ -1581,14 +1581,14 @@ define <8 x float> @test_movshdup(<8 x float> %a0, <8 x float> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00]
; BTVER2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movshdup:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00]
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
%2 = load <8 x float>, <8 x float> *%a1, align 32
@@ -1601,9 +1601,9 @@ define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) {
; SANDY-LABEL: test_movsldup:
; SANDY: # BB#0:
; SANDY-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
-; SANDY-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [4:0.50]
+; SANDY-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movsldup:
; HASWELL: # BB#0:
@@ -1616,14 +1616,14 @@ define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00]
; BTVER2-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movsldup:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00]
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
%2 = load <8 x float>, <8 x float> *%a1, align 32
@@ -1635,12 +1635,12 @@ define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) {
define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
; SANDY-LABEL: test_movupd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [4:0.50]
-; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [6:0.50]
+; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vextractf128 $1, %ymm0, 16(%rsi) # sched: [1:1.00]
-; SANDY-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vextractf128 $1, %ymm0, 16(%rsi) # sched: [5:1.00]
+; SANDY-NEXT: vmovupd %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movupd:
; HASWELL: # BB#0:
@@ -1652,14 +1652,14 @@ define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
; BTVER2-LABEL: test_movupd:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovupd (%rdi), %ymm0 # sched: [5:1.00]
-; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movupd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = load <4 x double>, <4 x double> *%a0, align 1
@@ -1671,12 +1671,12 @@ define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) {
; SANDY-LABEL: test_movups:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [4:0.50]
-; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [6:0.50]
+; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vextractf128 $1, %ymm0, 16(%rsi) # sched: [1:1.00]
-; SANDY-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vextractf128 $1, %ymm0, 16(%rsi) # sched: [5:1.00]
+; SANDY-NEXT: vmovups %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movups:
; HASWELL: # BB#0:
@@ -1688,14 +1688,14 @@ define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) {
; BTVER2-LABEL: test_movups:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovups (%rdi), %ymm0 # sched: [5:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movups:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = load <8 x float>, <8 x float> *%a0, align 1
@@ -1708,8 +1708,8 @@ define <4 x double> @test_mulpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; SANDY-LABEL: test_mulpd:
; SANDY: # BB#0:
; SANDY-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulpd:
; HASWELL: # BB#0:
@@ -1719,14 +1719,14 @@ define <4 x double> @test_mulpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; BTVER2-LABEL: test_mulpd:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
+; BTVER2-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00]
+; BTVER2-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mulpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
+; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00]
+; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fmul <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -1738,8 +1738,8 @@ define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; SANDY-LABEL: test_mulps:
; SANDY: # BB#0:
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulps:
; HASWELL: # BB#0:
@@ -1749,14 +1749,14 @@ define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; BTVER2-LABEL: test_mulps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mulps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
+; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fmul <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
@@ -1767,10 +1767,10 @@ define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; SANDY-LABEL: orpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: orpd:
; HASWELL: # BB#0:
@@ -1783,14 +1783,14 @@ define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2)
; BTVER2: # BB#0:
; BTVER2-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: orpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
@@ -1806,10 +1806,10 @@ define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2)
define <8 x float> @test_orps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_orps:
; SANDY: # BB#0:
-; SANDY-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_orps:
; HASWELL: # BB#0:
@@ -1822,14 +1822,14 @@ define <8 x float> @test_orps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2
; BTVER2: # BB#0:
; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_orps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
@@ -1846,9 +1846,9 @@ define <2 x double> @test_permilpd(<2 x double> %a0, <2 x double> *%a1) {
; SANDY-LABEL: test_permilpd:
; SANDY: # BB#0:
; SANDY-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
-; SANDY-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [5:1.00]
+; SANDY-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilpd:
; HASWELL: # BB#0:
@@ -1880,10 +1880,10 @@ define <2 x double> @test_permilpd(<2 x double> %a0, <2 x double> *%a1) {
define <4 x double> @test_permilpd_ymm(<4 x double> %a0, <4 x double> *%a1) {
; SANDY-LABEL: test_permilpd_ymm:
; SANDY: # BB#0:
-; SANDY-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
+; SANDY-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [8:1.00]
; SANDY-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [5:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilpd_ymm:
; HASWELL: # BB#0:
@@ -1896,14 +1896,14 @@ define <4 x double> @test_permilpd_ymm(<4 x double> %a0, <4 x double> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00]
; BTVER2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilpd_ymm:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00]
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
%2 = load <4 x double>, <4 x double> *%a1, align 32
@@ -1916,9 +1916,9 @@ define <4 x float> @test_permilps(<4 x float> %a0, <4 x float> *%a1) {
; SANDY-LABEL: test_permilps:
; SANDY: # BB#0:
; SANDY-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
-; SANDY-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [5:1.00]
+; SANDY-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilps:
; HASWELL: # BB#0:
@@ -1950,10 +1950,10 @@ define <4 x float> @test_permilps(<4 x float> %a0, <4 x float> *%a1) {
define <8 x float> @test_permilps_ymm(<8 x float> %a0, <8 x float> *%a1) {
; SANDY-LABEL: test_permilps_ymm:
; SANDY: # BB#0:
-; SANDY-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
+; SANDY-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [8:1.00]
; SANDY-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilps_ymm:
; HASWELL: # BB#0:
@@ -1966,14 +1966,14 @@ define <8 x float> @test_permilps_ymm(<8 x float> %a0, <8 x float> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00]
; BTVER2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilps_ymm:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00]
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
%2 = load <8 x float>, <8 x float> *%a1, align 32
@@ -1986,8 +1986,8 @@ define <2 x double> @test_permilvarpd(<2 x double> %a0, <2 x i64> %a1, <2 x i64>
; SANDY-LABEL: test_permilvarpd:
; SANDY: # BB#0:
; SANDY-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
-; SANDY-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilvarpd:
; HASWELL: # BB#0:
@@ -2018,7 +2018,7 @@ define <4 x double> @test_permilvarpd_ymm(<4 x double> %a0, <4 x i64> %a1, <4 x
; SANDY: # BB#0:
; SANDY-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilvarpd_ymm:
; HASWELL: # BB#0:
@@ -2048,8 +2048,8 @@ define <4 x float> @test_permilvarps(<4 x float> %a0, <4 x i32> %a1, <4 x i32> *
; SANDY-LABEL: test_permilvarps:
; SANDY: # BB#0:
; SANDY-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
-; SANDY-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilvarps:
; HASWELL: # BB#0:
@@ -2080,7 +2080,7 @@ define <8 x float> @test_permilvarps_ymm(<8 x float> %a0, <8 x i32> %a1, <8 x i3
; SANDY: # BB#0:
; SANDY-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilvarps_ymm:
; HASWELL: # BB#0:
@@ -2112,7 +2112,7 @@ define <8 x float> @test_rcpps(<8 x float> %a0, <8 x float> *%a1) {
; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vrcpps (%rdi), %ymm1 # sched: [9:1.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rcpps:
; HASWELL: # BB#0:
@@ -2123,16 +2123,16 @@ define <8 x float> @test_rcpps(<8 x float> %a0, <8 x float> *%a1) {
;
; BTVER2-LABEL: test_rcpps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:1.00]
-; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:2.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_rcpps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:1.00]
-; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:2.00]
+; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
@@ -2148,7 +2148,7 @@ define <4 x double> @test_roundpd(<4 x double> %a0, <4 x double> *%a1) {
; SANDY-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundpd:
; HASWELL: # BB#0:
@@ -2161,14 +2161,14 @@ define <4 x double> @test_roundpd(<4 x double> %a0, <4 x double> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [8:1.00]
; BTVER2-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_roundpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [8:1.00]
; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7)
%2 = load <4 x double>, <4 x double> *%a1, align 32
@@ -2184,7 +2184,7 @@ define <8 x float> @test_roundps(<8 x float> %a0, <8 x float> *%a1) {
; SANDY-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [7:1.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundps:
; HASWELL: # BB#0:
@@ -2197,14 +2197,14 @@ define <8 x float> @test_roundps(<8 x float> %a0, <8 x float> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [8:1.00]
; BTVER2-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_roundps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [8:1.00]
; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7)
%2 = load <8 x float>, <8 x float> *%a1, align 32
@@ -2217,10 +2217,10 @@ declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readno
define <8 x float> @test_rsqrtps(<8 x float> %a0, <8 x float> *%a1) {
; SANDY-LABEL: test_rsqrtps:
; SANDY: # BB#0:
-; SANDY-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [9:1.00]
+; SANDY-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [14:3.00]
+; SANDY-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [7:3.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rsqrtps:
; HASWELL: # BB#0:
@@ -2231,16 +2231,16 @@ define <8 x float> @test_rsqrtps(<8 x float> %a0, <8 x float> *%a1) {
;
; BTVER2-LABEL: test_rsqrtps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:1.00]
-; BTVER2-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:2.00]
+; BTVER2-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_rsqrtps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:1.00]
-; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:1.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:2.00]
+; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:2.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
@@ -2254,9 +2254,9 @@ define <4 x double> @test_shufpd(<4 x double> %a0, <4 x double> %a1, <4 x double
; SANDY-LABEL: test_shufpd:
; SANDY: # BB#0:
; SANDY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
-; SANDY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [5:1.00]
+; SANDY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shufpd:
; HASWELL: # BB#0:
@@ -2269,14 +2269,14 @@ define <4 x double> @test_shufpd(<4 x double> %a0, <4 x double> %a1, <4 x double
; BTVER2: # BB#0:
; BTVER2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
; BTVER2-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00]
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_shufpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
%2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -2289,8 +2289,8 @@ define <8 x float> @test_shufps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
; SANDY-LABEL: test_shufps:
; SANDY: # BB#0:
; SANDY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
-; SANDY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shufps:
; HASWELL: # BB#0:
@@ -2318,10 +2318,10 @@ define <8 x float> @test_shufps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
define <4 x double> @test_sqrtpd(<4 x double> %a0, <4 x double> *%a1) {
; SANDY-LABEL: test_sqrtpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [15:1.00]
-; SANDY-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [19:1.00]
+; SANDY-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [52:3.00]
+; SANDY-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [45:3.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtpd:
; HASWELL: # BB#0:
@@ -2332,16 +2332,16 @@ define <4 x double> @test_sqrtpd(<4 x double> %a0, <4 x double> *%a1) {
;
; BTVER2-LABEL: test_sqrtpd:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [26:21.00]
-; BTVER2-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [21:21.00]
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [59:54.00]
+; BTVER2-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [54:54.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sqrtpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [26:21.00]
-; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [21:21.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [59:54.00]
+; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [54:54.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0)
%2 = load <4 x double>, <4 x double> *%a1, align 32
@@ -2354,10 +2354,10 @@ declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone
define <8 x float> @test_sqrtps(<8 x float> %a0, <8 x float> *%a1) {
; SANDY-LABEL: test_sqrtps:
; SANDY: # BB#0:
-; SANDY-NEXT: vsqrtps %ymm0, %ymm0 # sched: [15:1.00]
-; SANDY-NEXT: vsqrtps (%rdi), %ymm1 # sched: [19:1.00]
+; SANDY-NEXT: vsqrtps (%rdi), %ymm1 # sched: [36:3.00]
+; SANDY-NEXT: vsqrtps %ymm0, %ymm0 # sched: [29:3.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtps:
; HASWELL: # BB#0:
@@ -2368,16 +2368,16 @@ define <8 x float> @test_sqrtps(<8 x float> %a0, <8 x float> *%a1) {
;
; BTVER2-LABEL: test_sqrtps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vsqrtps (%rdi), %ymm1 # sched: [26:21.00]
-; BTVER2-NEXT: vsqrtps %ymm0, %ymm0 # sched: [21:21.00]
-; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vsqrtps (%rdi), %ymm1 # sched: [47:42.00]
+; BTVER2-NEXT: vsqrtps %ymm0, %ymm0 # sched: [42:42.00]
+; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sqrtps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [26:21.00]
-; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [21:21.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [47:42.00]
+; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [42:42.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
@@ -2391,8 +2391,8 @@ define <4 x double> @test_subpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; SANDY-LABEL: test_subpd:
; SANDY: # BB#0:
; SANDY-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subpd:
; HASWELL: # BB#0:
@@ -2402,14 +2402,14 @@ define <4 x double> @test_subpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; BTVER2-LABEL: test_subpd:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; BTVER2-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_subpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fsub <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -2421,8 +2421,8 @@ define <8 x float> @test_subps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; SANDY-LABEL: test_subps:
; SANDY: # BB#0:
; SANDY-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subps:
; HASWELL: # BB#0:
@@ -2432,14 +2432,14 @@ define <8 x float> @test_subps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; BTVER2-LABEL: test_subps:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; BTVER2-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_subps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
+; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
+; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = fsub <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
@@ -2451,11 +2451,11 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SANDY-LABEL: test_testpd:
; SANDY: # BB#0:
; SANDY-NEXT: xorl %eax, %eax # sched: [1:0.33]
-; SANDY-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: setb %al # sched: [1:0.33]
-; SANDY-NEXT: vtestpd (%rdi), %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: setb %al # sched: [1:1.00]
+; SANDY-NEXT: vtestpd (%rdi), %xmm0 # sched: [7:1.00]
; SANDY-NEXT: adcl $0, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_testpd:
; HASWELL: # BB#0:
@@ -2495,12 +2495,12 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
; SANDY-LABEL: test_testpd_ymm:
; SANDY: # BB#0:
; SANDY-NEXT: xorl %eax, %eax # sched: [1:0.33]
-; SANDY-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: setb %al # sched: [1:0.33]
-; SANDY-NEXT: vtestpd (%rdi), %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: setb %al # sched: [1:1.00]
+; SANDY-NEXT: vtestpd (%rdi), %ymm0 # sched: [8:1.00]
; SANDY-NEXT: adcl $0, %eax # sched: [1:0.33]
; SANDY-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_testpd_ymm:
; HASWELL: # BB#0:
@@ -2542,11 +2542,11 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SANDY-LABEL: test_testps:
; SANDY: # BB#0:
; SANDY-NEXT: xorl %eax, %eax # sched: [1:0.33]
-; SANDY-NEXT: vtestps %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: setb %al # sched: [1:0.33]
-; SANDY-NEXT: vtestps (%rdi), %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vtestps %xmm1, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: setb %al # sched: [1:1.00]
+; SANDY-NEXT: vtestps (%rdi), %xmm0 # sched: [7:1.00]
; SANDY-NEXT: adcl $0, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_testps:
; HASWELL: # BB#0:
@@ -2586,12 +2586,12 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
; SANDY-LABEL: test_testps_ymm:
; SANDY: # BB#0:
; SANDY-NEXT: xorl %eax, %eax # sched: [1:0.33]
-; SANDY-NEXT: vtestps %ymm1, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: setb %al # sched: [1:0.33]
-; SANDY-NEXT: vtestps (%rdi), %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vtestps %ymm1, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: setb %al # sched: [1:1.00]
+; SANDY-NEXT: vtestps (%rdi), %ymm0 # sched: [8:1.00]
; SANDY-NEXT: adcl $0, %eax # sched: [1:0.33]
; SANDY-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_testps_ymm:
; HASWELL: # BB#0:
@@ -2635,7 +2635,7 @@ define <4 x double> @test_unpckhpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
; SANDY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SANDY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [5:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpckhpd:
; HASWELL: # BB#0:
@@ -2648,14 +2648,14 @@ define <4 x double> @test_unpckhpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
; BTVER2: # BB#0:
; BTVER2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
; BTVER2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00]
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpckhpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
%2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -2669,7 +2669,7 @@ define <8 x float> @test_unpckhps(<8 x float> %a0, <8 x float> %a1, <8 x float>
; SANDY: # BB#0:
; SANDY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SANDY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpckhps:
; HASWELL: # BB#0:
@@ -2698,9 +2698,9 @@ define <4 x double> @test_unpcklpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
; SANDY-LABEL: test_unpcklpd:
; SANDY: # BB#0:
; SANDY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
-; SANDY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [5:1.00]
+; SANDY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpcklpd:
; HASWELL: # BB#0:
@@ -2713,14 +2713,14 @@ define <4 x double> @test_unpcklpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
; BTVER2: # BB#0:
; BTVER2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
; BTVER2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00]
-; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpcklpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
%2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -2733,8 +2733,8 @@ define <8 x float> @test_unpcklps(<8 x float> %a0, <8 x float> %a1, <8 x float>
; SANDY-LABEL: test_unpcklps:
; SANDY: # BB#0:
; SANDY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
-; SANDY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpcklps:
; HASWELL: # BB#0:
@@ -2762,10 +2762,10 @@ define <8 x float> @test_unpcklps(<8 x float> %a0, <8 x float> %a1, <8 x float>
define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; SANDY-LABEL: test_xorpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_xorpd:
; HASWELL: # BB#0:
@@ -2778,14 +2778,14 @@ define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; BTVER2: # BB#0:
; BTVER2-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_xorpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
@@ -2801,10 +2801,10 @@ define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; SANDY-LABEL: test_xorps:
; SANDY: # BB#0:
-; SANDY-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
-; SANDY-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; SANDY-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
+; SANDY-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_xorps:
; HASWELL: # BB#0:
@@ -2817,14 +2817,14 @@ define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; BTVER2: # BB#0:
; BTVER2-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_xorps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; ZNVER1-NEXT: retq # sched: [4:1.00]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
@@ -2841,7 +2841,7 @@ define void @test_zeroall() {
; SANDY-LABEL: test_zeroall:
; SANDY: # BB#0:
; SANDY-NEXT: vzeroall # sched: [?:0.000000e+00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_zeroall:
; HASWELL: # BB#0:
@@ -2866,7 +2866,7 @@ define void @test_zeroupper() {
; SANDY-LABEL: test_zeroupper:
; SANDY: # BB#0:
; SANDY-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_zeroupper:
; HASWELL: # BB#0:
diff --git a/test/CodeGen/X86/avx-unpack.ll b/test/CodeGen/X86/avx-unpack.ll
index 6924d98b38b1..7826bc97eec5 100644
--- a/test/CodeGen/X86/avx-unpack.ll
+++ b/test/CodeGen/X86/avx-unpack.ll
@@ -1,57 +1,84 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
-; CHECK: vunpckhps
define <8 x float> @unpackhips(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhips:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
ret <8 x float> %shuffle.i
}
-; CHECK: vunpckhpd
define <4 x double> @unpackhipd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhipd:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x double> %shuffle.i
}
-; CHECK: vunpcklps
define <8 x float> @unpacklops(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklops:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
ret <8 x float> %shuffle.i
}
-; CHECK: vunpcklpd
define <4 x double> @unpacklopd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklopd:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
ret <4 x double> %shuffle.i
}
-; CHECK-NOT: vunpcklps %ymm
-define <8 x float> @unpacklops-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
-entry:
+define <8 x float> @unpacklops_not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
+; CHECK-LABEL: unpacklops_not:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
ret <8 x float> %shuffle.i
}
-; CHECK-NOT: vunpcklpd %ymm
-define <4 x double> @unpacklopd-not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
-entry:
+define <4 x double> @unpacklopd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
+; CHECK-LABEL: unpacklopd_not:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
ret <4 x double> %shuffle.i
}
-; CHECK-NOT: vunpckhps %ymm
-define <8 x float> @unpackhips-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
-entry:
+define <8 x float> @unpackhips_not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
+; CHECK-LABEL: unpackhips_not:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,2,u,3,u,4,u,5]
+; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,u,3,u,4,u,5,u]
+; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13>
ret <8 x float> %shuffle.i
}
-; CHECK-NOT: vunpckhpd %ymm
-define <4 x double> @unpackhipd-not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
-entry:
+define <4 x double> @unpackhipd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
+; CHECK-LABEL: unpackhipd_not:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
ret <4 x double> %shuffle.i
}
@@ -60,102 +87,135 @@ entry:
;;;; Unpack versions using the fp unit for int unpacking
;;;;
-; CHECK: vunpckhps
define <8 x i32> @unpackhips1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhips1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
ret <8 x i32> %shuffle.i
}
-; CHECK: vunpckhps (%
define <8 x i32> @unpackhips2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhips2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps (%rdi), %ymm0
+; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; CHECK-NEXT: retq
%a = load <8 x i32>, <8 x i32>* %src1
%b = load <8 x i32>, <8 x i32>* %src2
%shuffle.i = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
ret <8 x i32> %shuffle.i
}
-; CHECK: vunpckhpd
define <4 x i64> @unpackhipd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhipd1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x i64> %shuffle.i
}
-; CHECK: vunpckhpd (%
define <4 x i64> @unpackhipd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhipd2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovapd (%rdi), %ymm0
+; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; CHECK-NEXT: retq
%a = load <4 x i64>, <4 x i64>* %src1
%b = load <4 x i64>, <4 x i64>* %src2
%shuffle.i = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x i64> %shuffle.i
}
-; CHECK: vunpcklps
define <8 x i32> @unpacklops1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklops1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
ret <8 x i32> %shuffle.i
}
-; CHECK: vunpcklps (%
define <8 x i32> @unpacklops2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklops2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps (%rdi), %ymm0
+; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; CHECK-NEXT: retq
%a = load <8 x i32>, <8 x i32>* %src1
%b = load <8 x i32>, <8 x i32>* %src2
%shuffle.i = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
ret <8 x i32> %shuffle.i
}
-; CHECK: vunpcklpd
define <4 x i64> @unpacklopd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklopd1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
ret <4 x i64> %shuffle.i
}
-; CHECK: vunpcklpd (%
define <4 x i64> @unpacklopd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklopd2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovapd (%rdi), %ymm0
+; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
+; CHECK-NEXT: retq
%a = load <4 x i64>, <4 x i64>* %src1
%b = load <4 x i64>, <4 x i64>* %src2
%shuffle.i = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
ret <4 x i64> %shuffle.i
}
-; CHECK: vpunpckhwd
-; CHECK: vpunpckhwd
-; CHECK: vinsertf128
define <16 x i16> @unpackhwd_undef(<16 x i16> %src1) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhwd_undef:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <16 x i16> %src1, <16 x i16> %src1, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
ret <16 x i16> %shuffle.i
}
-; CHECK: vpunpcklwd
-; CHECK: vpunpcklwd
-; CHECK: vinsertf128
define <16 x i16> @unpacklwd_undef(<16 x i16> %src1) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklwd_undef:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3]
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <16 x i16> %src1, <16 x i16> %src1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
ret <16 x i16> %shuffle.i
}
-; CHECK: vpunpckhbw
-; CHECK: vpunpckhbw
-; CHECK: vinsertf128
define <32 x i8> @unpackhbw_undef(<32 x i8> %src1, <32 x i8> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhbw_undef:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <32 x i8> %src1, <32 x i8> %src1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
ret <32 x i8> %shuffle.i
}
-; CHECK: vpunpcklbw
-; CHECK: vpunpcklbw
-; CHECK: vinsertf128
define <32 x i8> @unpacklbw_undef(<32 x i8> %src1) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklbw_undef:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%shuffle.i = shufflevector <32 x i8> %src1, <32 x i8> %src1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
ret <32 x i8> %shuffle.i
}
+
diff --git a/test/CodeGen/X86/avx-vinsertf128.ll b/test/CodeGen/X86/avx-vinsertf128.ll
index 38389de7a8a1..b7a4d5b5c308 100644
--- a/test/CodeGen/X86/avx-vinsertf128.ll
+++ b/test/CodeGen/X86/avx-vinsertf128.ll
@@ -1,30 +1,37 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
-; CHECK-LABEL: A:
-; CHECK-NOT: vunpck
-; CHECK: vinsertf128 $1
define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: A:
+; CHECK: # BB#0:
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; CHECK-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
ret <8 x float> %shuffle
}
-; CHECK-LABEL: B:
-; CHECK-NOT: vunpck
-; CHECK: vinsertf128 $1
define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: B:
+; CHECK: # BB#0:
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; CHECK-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 1>
ret <4 x double> %shuffle
}
declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
-
declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
-; Just check that no crash happens
-; CHECK-LABEL: _insert_crash:
define void @insert_crash() nounwind {
+; CHECK-LABEL: insert_crash:
+; CHECK: # BB#0: # %allocas
+; CHECK-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vminpd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vminsd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; CHECK-NEXT: vmovups %xmm0, (%rax)
+; CHECK-NEXT: retq
allocas:
%v1.i.i451 = shufflevector <4 x double> zeroinitializer, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%ret_0a.i.i.i452 = shufflevector <4 x double> %v1.i.i451, <4 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -40,72 +47,87 @@ allocas:
;; DAG Combine must remove useless vinsertf128 instructions
-; CHECK-LABEL: DAGCombineA:
-; CHECK-NOT: vinsertf128 $1
define <4 x i32> @DAGCombineA(<4 x i32> %v1) nounwind readonly {
- %1 = shufflevector <4 x i32> %v1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i32> %2
+; CHECK-LABEL: DAGCombineA:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %t2 = shufflevector <8 x i32> %t1, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %t2
}
-; CHECK-LABEL: DAGCombineB:
-; CHECK: vpaddd %xmm
-; CHECK-NOT: vinsertf128 $1
-; CHECK: vpaddd %xmm
define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly {
- %1 = add <8 x i32> %v1, %v2
- %2 = add <8 x i32> %1, %v1
- ret <8 x i32> %2
+; CHECK-LABEL: DAGCombineB:
+; CHECK: # BB#0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; CHECK-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm1
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %t1 = add <8 x i32> %v1, %v2
+ %t2 = add <8 x i32> %t1, %v1
+ ret <8 x i32> %t2
}
-; CHECK-LABEL: insert_undef_pd:
define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) {
-; CHECK: vmovaps %ymm1, %ymm0
+; CHECK-LABEL: insert_undef_pd:
+; CHECK: # BB#0:
+; CHECK-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> undef, <2 x double> %a1, i8 0)
ret <4 x double> %res
}
declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind readnone
-
-; CHECK-LABEL: insert_undef_ps:
define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) {
-; CHECK: vmovaps %ymm1, %ymm0
+; CHECK-LABEL: insert_undef_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %a1, i8 0)
ret <8 x float> %res
}
declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8) nounwind readnone
-
-; CHECK-LABEL: insert_undef_si:
define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) {
-; CHECK: vmovaps %ymm1, %ymm0
+; CHECK-LABEL: insert_undef_si:
+; CHECK: # BB#0:
+; CHECK-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> undef, <4 x i32> %a1, i8 0)
ret <8 x i32> %res
}
declare <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32>, <4 x i32>, i8) nounwind readnone
; rdar://10643481
-; CHECK-LABEL: vinsertf128_combine:
define <8 x float> @vinsertf128_combine(float* nocapture %f) nounwind uwtable readonly ssp {
-; CHECK-NOT: vmovaps
-; CHECK: vinsertf128
-entry:
+; CHECK-LABEL: vinsertf128_combine:
+; CHECK: # BB#0:
+; CHECK-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
+; CHECK-NEXT: retq
%add.ptr = getelementptr inbounds float, float* %f, i64 4
- %0 = bitcast float* %add.ptr to <4 x float>*
- %1 = load <4 x float>, <4 x float>* %0, align 16
- %2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
- ret <8 x float> %2
+ %t0 = bitcast float* %add.ptr to <4 x float>*
+ %t1 = load <4 x float>, <4 x float>* %t0, align 16
+ %t2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %t1, i8 1)
+ ret <8 x float> %t2
}
; rdar://11076953
-; CHECK-LABEL: vinsertf128_ucombine:
define <8 x float> @vinsertf128_ucombine(float* nocapture %f) nounwind uwtable readonly ssp {
-; CHECK-NOT: vmovups
-; CHECK: vinsertf128
-entry:
+; CHECK-LABEL: vinsertf128_ucombine:
+; CHECK: # BB#0:
+; CHECK-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
+; CHECK-NEXT: retq
%add.ptr = getelementptr inbounds float, float* %f, i64 4
- %0 = bitcast float* %add.ptr to <4 x float>*
- %1 = load <4 x float>, <4 x float>* %0, align 8
- %2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
- ret <8 x float> %2
+ %t0 = bitcast float* %add.ptr to <4 x float>*
+ %t1 = load <4 x float>, <4 x float>* %t0, align 8
+ %t2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %t1, i8 1)
+ ret <8 x float> %t2
}
+
diff --git a/test/CodeGen/X86/avx2-vbroadcast.ll b/test/CodeGen/X86/avx2-vbroadcast.ll
index 971d03af3778..318c9cfd8a3f 100644
--- a/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -633,13 +633,13 @@ entry:
define <8 x i32> @V111(<8 x i32> %in) nounwind uwtable readnone ssp {
; X32-AVX2-LABEL: V111:
; X32-AVX2: ## BB#0: ## %entry
-; X32-AVX2-NEXT: vpbroadcastd LCPI29_0, %ymm1
+; X32-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
; X32-AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
;
; X64-AVX2-LABEL: V111:
; X64-AVX2: ## BB#0: ## %entry
-; X64-AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
; X64-AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: retq
;
@@ -660,13 +660,13 @@ entry:
define <8 x float> @V113(<8 x float> %in) nounwind uwtable readnone ssp {
; X32-AVX2-LABEL: V113:
; X32-AVX2: ## BB#0: ## %entry
-; X32-AVX2-NEXT: vbroadcastss LCPI30_0, %ymm1
+; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X32-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
;
; X64-AVX2-LABEL: V113:
; X64-AVX2: ## BB#0: ## %entry
-; X64-AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
+; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X64-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: retq
;
@@ -687,12 +687,12 @@ entry:
define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: _e2:
; X32: ## BB#0:
-; X32-NEXT: vbroadcastss LCPI31_0, %xmm0
+; X32-NEXT: vbroadcastss {{.*#+}} xmm0 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X32-NEXT: retl
;
; X64-LABEL: _e2:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastss {{.*}}(%rip), %xmm0
+; X64-NEXT: vbroadcastss {{.*#+}} xmm0 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X64-NEXT: retq
%vecinit.i = insertelement <4 x float> undef, float 0xbf80000000000000, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float 0xbf80000000000000, i32 1
diff --git a/test/CodeGen/X86/avx512-cmp.ll b/test/CodeGen/X86/avx512-cmp.ll
index eae7b94f5135..b5a13404a230 100644
--- a/test/CodeGen/X86/avx512-cmp.ll
+++ b/test/CodeGen/X86/avx512-cmp.ll
@@ -14,6 +14,7 @@ define double @test1(double %a, double %b) nounwind {
; ALL-NEXT: LBB0_2: ## %l2
; ALL-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
+; ALL-NEXT: ## -- End function
%tobool = fcmp une double %a, %b
br i1 %tobool, label %l1, label %l2
@@ -36,6 +37,7 @@ define float @test2(float %a, float %b) nounwind {
; ALL-NEXT: LBB1_2: ## %l2
; ALL-NEXT: vaddss %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
+; ALL-NEXT: ## -- End function
%tobool = fcmp olt float %a, %b
br i1 %tobool, label %l1, label %l2
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index 29a5325a0ae9..f858e7eb792f 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -12,6 +12,7 @@ define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
; KNL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; KNL-NEXT: vinsertf32x4 $3, %xmm0, %zmm2, %zmm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test1:
; SKX: ## BB#0:
@@ -21,6 +22,7 @@ define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
; SKX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; SKX-NEXT: vinsertf32x4 $3, %xmm0, %zmm2, %zmm0
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%rrr = load float, float* %br
%rrr2 = insertelement <16 x float> %x, float %rrr, i32 1
%rrr3 = insertelement <16 x float> %rrr2, float %y, i32 14
@@ -36,6 +38,7 @@ define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
; KNL-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; KNL-NEXT: vinsertf32x4 $3, %xmm0, %zmm2, %zmm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test2:
; SKX: ## BB#0:
@@ -45,6 +48,7 @@ define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
; SKX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SKX-NEXT: vinsertf64x2 $3, %xmm0, %zmm2, %zmm0
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%rrr = load double, double* %br
%rrr2 = insertelement <8 x double> %x, double %rrr, i32 1
%rrr3 = insertelement <8 x double> %rrr2, double %y, i32 6
@@ -58,6 +62,7 @@ define <16 x float> @test3(<16 x float> %x) nounwind {
; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[2,3]
; KNL-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test3:
; SKX: ## BB#0:
@@ -65,6 +70,7 @@ define <16 x float> @test3(<16 x float> %x) nounwind {
; SKX-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[2,3]
; SKX-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%eee = extractelement <16 x float> %x, i32 4
%rrr2 = insertelement <16 x float> %x, float %eee, i32 1
ret <16 x float> %rrr2
@@ -78,6 +84,7 @@ define <8 x i64> @test4(<8 x i64> %x) nounwind {
; KNL-NEXT: vpinsrq $1, %rax, %xmm0, %xmm1
; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test4:
; SKX: ## BB#0:
@@ -86,6 +93,7 @@ define <8 x i64> @test4(<8 x i64> %x) nounwind {
; SKX-NEXT: vpinsrq $1, %rax, %xmm0, %xmm1
; SKX-NEXT: vinserti64x2 $0, %xmm1, %zmm0, %zmm0
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%eee = extractelement <8 x i64> %x, i32 4
%rrr2 = insertelement <8 x i64> %x, i64 %eee, i32 1
ret <8 x i64> %rrr2
@@ -96,11 +104,13 @@ define i32 @test5(<4 x float> %x) nounwind {
; KNL: ## BB#0:
; KNL-NEXT: vextractps $3, %xmm0, %eax
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test5:
; SKX: ## BB#0:
; SKX-NEXT: vextractps $3, %xmm0, %eax
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%ef = extractelement <4 x float> %x, i32 3
%ei = bitcast float %ef to i32
ret i32 %ei
@@ -111,11 +121,13 @@ define void @test6(<4 x float> %x, float* %out) nounwind {
; KNL: ## BB#0:
; KNL-NEXT: vextractps $3, %xmm0, (%rdi)
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test6:
; SKX: ## BB#0:
; SKX-NEXT: vextractps $3, %xmm0, (%rdi)
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%ef = extractelement <4 x float> %x, i32 3
store float %ef, float* %out, align 4
ret void
@@ -135,6 +147,7 @@ define float @test7(<16 x float> %x, i32 %ind) nounwind {
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test7:
; SKX: ## BB#0:
@@ -150,6 +163,7 @@ define float @test7(<16 x float> %x, i32 %ind) nounwind {
; SKX-NEXT: popq %rbp
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%e = extractelement <16 x float> %x, i32 %ind
ret float %e
}
@@ -168,6 +182,7 @@ define double @test8(<8 x double> %x, i32 %ind) nounwind {
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test8:
; SKX: ## BB#0:
@@ -183,6 +198,7 @@ define double @test8(<8 x double> %x, i32 %ind) nounwind {
; SKX-NEXT: popq %rbp
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%e = extractelement <8 x double> %x, i32 %ind
ret double %e
}
@@ -201,6 +217,7 @@ define float @test9(<8 x float> %x, i32 %ind) nounwind {
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test9:
; SKX: ## BB#0:
@@ -216,6 +233,7 @@ define float @test9(<8 x float> %x, i32 %ind) nounwind {
; SKX-NEXT: popq %rbp
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%e = extractelement <8 x float> %x, i32 %ind
ret float %e
}
@@ -234,6 +252,7 @@ define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test10:
; SKX: ## BB#0:
@@ -249,6 +268,7 @@ define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
; SKX-NEXT: popq %rbp
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%e = extractelement <16 x i32> %x, i32 %ind
ret i32 %e
}
@@ -1293,7 +1313,7 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; KNL: ## BB#0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
-; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; KNL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
@@ -1457,7 +1477,7 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
; KNL-LABEL: test_extractelement_v4i1:
; KNL: ## BB#0:
-; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; KNL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -2326,7 +2346,7 @@ define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b,
; KNL-LABEL: test_extractelement_varible_v4i1:
; KNL: ## BB#0:
; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
-; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; KNL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll
index 2b04b9229b3d..b3fbceea80a9 100644
--- a/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -8,6 +8,7 @@ define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
; CHECK-NEXT: vcmpleps %zmm1, %zmm0, %k1
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask = fcmp ole <16 x float> %x, %y
%max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %y
ret <16 x float> %max
@@ -19,6 +20,7 @@ define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
; CHECK-NEXT: vcmplepd %zmm1, %zmm0, %k1
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask = fcmp ole <8 x double> %x, %y
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %y
ret <8 x double> %max
@@ -30,6 +32,7 @@ define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwin
; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%y = load <16 x i32>, <16 x i32>* %yp, align 4
%mask = icmp eq <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -42,6 +45,7 @@ define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1)
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask = icmp uge <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
ret <16 x i32> %max
@@ -53,6 +57,7 @@ define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask = icmp eq <8 x i64> %x, %y
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
ret <8 x i64> %max
@@ -64,6 +69,7 @@ define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1) noun
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask = icmp ugt <8 x i64> %x, %y
%max = select <8 x i1> %mask, <8 x i64> %x1, <8 x i64> %y
ret <8 x i64> %max
@@ -117,12 +123,14 @@ define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test9:
; SKX: ## BB#0:
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; SKX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%mask = icmp eq <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
ret <8 x i32> %max
@@ -137,12 +145,14 @@ define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test10:
; SKX: ## BB#0:
; SKX-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%mask = fcmp oeq <8 x float> %x, %y
%max = select <8 x i1> %mask, <8 x float> %x, <8 x float> %y
@@ -154,6 +164,7 @@ define <8 x i32> @test11_unsigned(<8 x i32> %x, <8 x i32> %y) nounwind {
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask = icmp ugt <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
ret <8 x i32> %max
@@ -168,6 +179,7 @@ define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test12:
; SKX: ## BB#0:
@@ -178,6 +190,7 @@ define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%res = icmp eq <16 x i64> %a, %b
%res1 = bitcast <16 x i1> %res to i16
ret i16 %res1
@@ -330,6 +343,7 @@ define i32 @test12_v32i32(<32 x i32> %a, <32 x i32> %b) nounwind {
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test12_v32i32:
; SKX: ## BB#0:
@@ -339,6 +353,7 @@ define i32 @test12_v32i32(<32 x i32> %a, <32 x i32> %b) nounwind {
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%res = icmp eq <32 x i32> %a, %b
%res1 = bitcast <32 x i1> %res to i32
ret i32 %res1
@@ -642,6 +657,7 @@ define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test12_v64i16:
; SKX: ## BB#0:
@@ -651,6 +667,7 @@ define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
; SKX-NEXT: kmovq %k0, %rax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%res = icmp eq <64 x i16> %a, %b
%res1 = bitcast <64 x i1> %res to i64
ret i64 %res1
@@ -704,6 +721,7 @@ define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k1
; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask = icmp sge <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
ret <16 x i32> %max
@@ -715,6 +733,7 @@ define <16 x i32> @test17(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sgt <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -727,6 +746,7 @@ define <16 x i32> @test18(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-NEXT: vpcmpled (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sle <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -739,6 +759,7 @@ define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp ule <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -752,6 +773,7 @@ define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask1 = icmp eq <16 x i32> %x1, %y1
%mask0 = icmp eq <16 x i32> %x, %y
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
@@ -766,6 +788,7 @@ define <8 x i64> @test21(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y
; CHECK-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1}
; CHECK-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask1 = icmp sge <8 x i64> %x1, %y1
%mask0 = icmp sle <8 x i64> %x, %y
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
@@ -780,6 +803,7 @@ define <8 x i64> @test22(<8 x i64> %x, <8 x i64>* %y.ptr, <8 x i64> %x1, <8 x i6
; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask1 = icmp sgt <8 x i64> %x1, %y1
%y = load <8 x i64>, <8 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <8 x i64> %x, %y
@@ -795,6 +819,7 @@ define <16 x i32> @test23(<16 x i32> %x, <16 x i32>* %y.ptr, <16 x i32> %x1, <16
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask1 = icmp sge <16 x i32> %x1, %y1
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask0 = icmp ule <16 x i32> %x, %y
@@ -809,6 +834,7 @@ define <8 x i64> @test24(<8 x i64> %x, <8 x i64> %x1, i64* %yb.ptr) nounwind {
; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k1
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
@@ -823,6 +849,7 @@ define <16 x i32> @test25(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1) nounwind
; CHECK-NEXT: vpcmpled (%rdi){1to16}, %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
@@ -838,6 +865,7 @@ define <16 x i32> @test26(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1, <16 x i32
; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask1 = icmp sge <16 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
@@ -855,6 +883,7 @@ define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y
; CHECK-NEXT: vpcmpleq (%rdi){1to8}, %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%mask1 = icmp sge <8 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
@@ -920,12 +949,14 @@ define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
; KNL-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm2
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test30:
; SKX: ## BB#0:
; SKX-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%mask = fcmp oeq <4 x double> %x, %y
%max = select <4 x i1> %mask, <4 x double> %x, <4 x double> %y
@@ -938,12 +969,14 @@ define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp
; KNL-NEXT: vcmpltpd (%rdi), %xmm0, %xmm2
; KNL-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test31:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi), %xmm0, %k1
; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%y = load <2 x double>, <2 x double>* %yp, align 4
%mask = fcmp olt <2 x double> %x, %y
@@ -957,12 +990,14 @@ define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp
; KNL-NEXT: vcmpltpd (%rdi), %ymm0, %ymm2
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test32:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi), %ymm0, %k1
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%y = load <4 x double>, <4 x double>* %yp, align 4
%mask = fcmp ogt <4 x double> %y, %x
@@ -976,6 +1011,7 @@ define <8 x double> @test33(<8 x double> %x, <8 x double> %x1, <8 x double>* %yp
; CHECK-NEXT: vcmpltpd (%rdi), %zmm0, %k1
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%y = load <8 x double>, <8 x double>* %yp, align 4
%mask = fcmp olt <8 x double> %x, %y
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %x1
@@ -988,12 +1024,14 @@ define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) no
; KNL-NEXT: vcmpltps (%rdi), %xmm0, %xmm2
; KNL-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test34:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi), %xmm0, %k1
; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%y = load <4 x float>, <4 x float>* %yp, align 4
%mask = fcmp olt <4 x float> %x, %y
%max = select <4 x i1> %mask, <4 x float> %x, <4 x float> %x1
@@ -1010,12 +1048,14 @@ define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) no
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test35:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi), %ymm0, %k1
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%y = load <8 x float>, <8 x float>* %yp, align 4
%mask = fcmp ogt <8 x float> %y, %x
@@ -1029,6 +1069,7 @@ define <16 x float> @test36(<16 x float> %x, <16 x float> %x1, <16 x float>* %yp
; CHECK-NEXT: vcmpltps (%rdi), %zmm0, %k1
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%y = load <16 x float>, <16 x float>* %yp, align 4
%mask = fcmp olt <16 x float> %x, %y
%max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %x1
@@ -1041,6 +1082,7 @@ define <8 x double> @test37(<8 x double> %x, <8 x double> %x1, double* %ptr) nou
; CHECK-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%a = load double, double* %ptr
%v = insertelement <8 x double> undef, double %a, i32 0
@@ -1058,12 +1100,14 @@ define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nou
; KNL-NEXT: vcmpltpd %ymm2, %ymm0, %ymm2
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test38:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi){1to4}, %ymm0, %k1
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%a = load double, double* %ptr
%v = insertelement <4 x double> undef, double %a, i32 0
@@ -1081,12 +1125,14 @@ define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nou
; KNL-NEXT: vcmpltpd %xmm2, %xmm0, %xmm2
; KNL-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test39:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi){1to2}, %xmm0, %k1
; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%a = load double, double* %ptr
%v = insertelement <2 x double> undef, double %a, i32 0
@@ -1104,6 +1150,7 @@ define <16 x float> @test40(<16 x float> %x, <16 x float> %x1, float* %ptr) n
; CHECK-NEXT: vcmpltps (%rdi){1to16}, %zmm0, %k1
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
+; CHECK-NEXT: ## -- End function
%a = load float, float* %ptr
%v = insertelement <16 x float> undef, float %a, i32 0
@@ -1124,12 +1171,14 @@ define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) noun
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test41:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi){1to8}, %ymm0, %k1
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%a = load float, float* %ptr
%v = insertelement <8 x float> undef, float %a, i32 0
@@ -1147,12 +1196,14 @@ define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) noun
; KNL-NEXT: vcmpltps %xmm2, %xmm0, %xmm2
; KNL-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test42:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi){1to4}, %xmm0, %k1
; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%a = load float, float* %ptr
%v = insertelement <4 x float> undef, float %a, i32 0
@@ -1172,6 +1223,7 @@ define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x
; KNL-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
; KNL-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
+; KNL-NEXT: ## -- End function
;
; SKX-LABEL: test43:
; SKX: ## BB#0:
@@ -1180,6 +1232,7 @@ define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x
; SKX-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
; SKX-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
+; SKX-NEXT: ## -- End function
%a = load double, double* %ptr
%v = insertelement <8 x double> undef, double %a, i32 0
diff --git a/test/CodeGen/X86/avx512vl-vec-cmp.ll b/test/CodeGen/X86/avx512vl-vec-cmp.ll
index e0acf2be653e..43b1f53a09fa 100644
--- a/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -1,56 +1,98 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=VLX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=NoVLX
define <4 x i64> @test256_1(<4 x i64> %x, <4 x i64> %y) nounwind {
-; CHECK-LABEL: test256_1:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_1:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
+; VLX-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_1:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm2
+; NoVLX-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: retq
%mask = icmp eq <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %y
ret <4 x i64> %max
}
define <4 x i64> @test256_2(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind {
-; CHECK-LABEL: test256_2:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_2:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
+; VLX-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_2:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vblendvpd %ymm0, %ymm2, %ymm1, %ymm0
+; NoVLX-NEXT: retq
%mask = icmp sgt <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
ret <4 x i64> %max
}
define <8 x i32> @test256_3(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1) nounwind {
-; CHECK-LABEL: test256_3:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k1
-; CHECK-NEXT: vpblendmd %ymm2, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_3:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k1
+; VLX-NEXT: vpblendmd %ymm2, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_3:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k1
+; NoVLX-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%mask = icmp sge <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x1, <8 x i32> %y
ret <8 x i32> %max
}
define <4 x i64> @test256_4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind {
-; CHECK-LABEL: test256_4:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_4:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpnleuq %ymm1, %ymm0, %k1
+; VLX-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_4:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm3, %ymm1, %ymm4
+; NoVLX-NEXT: vpxor %ymm3, %ymm0, %ymm0
+; NoVLX-NEXT: vpcmpgtq %ymm4, %ymm0, %ymm0
+; NoVLX-NEXT: vblendvpd %ymm0, %ymm2, %ymm1, %ymm0
+; NoVLX-NEXT: retq
%mask = icmp ugt <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
ret <4 x i64> %max
}
define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
-; CHECK-LABEL: test256_5:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_5:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_5:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -58,11 +100,21 @@ define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwin
}
define <8 x i32> @test256_5b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
-; CHECK-LABEL: test256_5b:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_5b:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_5b:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpeqd %zmm0, %zmm2, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %y, %x
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -70,11 +122,21 @@ define <8 x i32> @test256_5b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
}
define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test256_6:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_6:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_6:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sgt <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -82,11 +144,21 @@ define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
}
define <8 x i32> @test256_6b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test256_6b:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_6b:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_6b:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp slt <8 x i32> %y, %x
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -94,11 +166,21 @@ define <8 x i32> @test256_6b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
}
define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test256_7:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_7:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_7:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sle <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -106,11 +188,21 @@ define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
}
define <8 x i32> @test256_7b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test256_7b:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_7b:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_7b:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sge <8 x i32> %y, %x
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -118,11 +210,21 @@ define <8 x i32> @test256_7b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
}
define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test256_8:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_8:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleud (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_8:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpleud %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp ule <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -130,11 +232,21 @@ define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
}
define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test256_8b:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_8b:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleud (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_8b:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp uge <8 x i32> %y, %x
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -142,12 +254,25 @@ define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
}
define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32> %y1) nounwind {
-; CHECK-LABEL: test256_9:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_9:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
+; VLX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 {%k1}
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_9:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; NoVLX-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpeqd %zmm3, %zmm2, %k0
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%mask1 = icmp eq <8 x i32> %x1, %y1
%mask0 = icmp eq <8 x i32> %x, %y
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
@@ -156,12 +281,22 @@ define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32>
}
define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) nounwind {
-; CHECK-LABEL: test256_10:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpcmpleq %ymm2, %ymm3, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_10:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleq %ymm1, %ymm0, %k1
+; VLX-NEXT: vpcmpleq %ymm2, %ymm3, %k1 {%k1}
+; VLX-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_10:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm3
+; NoVLX-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4
+; NoVLX-NEXT: vpxor %ymm4, %ymm3, %ymm3
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm1
+; NoVLX-NEXT: vpandn %ymm3, %ymm1, %ymm1
+; NoVLX-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0
+; NoVLX-NEXT: retq
%mask1 = icmp sge <4 x i64> %x1, %y1
%mask0 = icmp sle <4 x i64> %x, %y
%mask = select <4 x i1> %mask0, <4 x i1> %mask1, <4 x i1> zeroinitializer
@@ -170,12 +305,20 @@ define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64
}
define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4 x i64> %y1) nounwind {
-; CHECK-LABEL: test256_11:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpgtq %ymm2, %ymm1, %k1
-; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_11:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpgtq %ymm2, %ymm1, %k1
+; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k1 {%k1}
+; VLX-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_11:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2
+; NoVLX-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm3
+; NoVLX-NEXT: vpand %ymm2, %ymm3, %ymm2
+; NoVLX-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: retq
%mask1 = icmp sgt <4 x i64> %x1, %y1
%y = load <4 x i64>, <4 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <4 x i64> %x, %y
@@ -185,12 +328,25 @@ define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4
}
define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8 x i32> %y1) nounwind {
-; CHECK-LABEL: test256_12:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled %ymm1, %ymm2, %k1
-; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_12:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled %ymm1, %ymm2, %k1
+; VLX-NEXT: vpcmpleud (%rdi), %ymm0, %k1 {%k1}
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_12:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k0
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpleud %zmm2, %zmm0, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask0 = icmp ule <8 x i32> %x, %y
@@ -200,11 +356,18 @@ define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8
}
define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind {
-; CHECK-LABEL: test256_13:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k1
-; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_13:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k1
+; VLX-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_13:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpeqq %ymm2, %ymm0, %ymm2
+; NoVLX-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
%y = shufflevector <4 x i64> %y.0, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -214,11 +377,21 @@ define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind
}
define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind {
-; CHECK-LABEL: test256_14:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled (%rdi){1to8}, %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_14:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled (%rdi){1to8}, %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_14:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
%y = shufflevector <8 x i32> %y.0, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -228,12 +401,25 @@ define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind
}
define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32> %y1) nounwind {
-; CHECK-LABEL: test256_15:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled %ymm1, %ymm2, %k1
-; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_15:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled %ymm1, %ymm2, %k1
+; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k1 {%k1}
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_15:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k0
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
@@ -245,12 +431,21 @@ define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32
}
define <4 x i64> @test256_16(<4 x i64> %x, i64* %yb.ptr, <4 x i64> %x1, <4 x i64> %y1) nounwind {
-; CHECK-LABEL: test256_16:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleq %ymm1, %ymm2, %k1
-; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_16:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleq %ymm1, %ymm2, %k1
+; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k1 {%k1}
+; VLX-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_16:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm2
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm3
+; NoVLX-NEXT: vpcmpgtq %ymm3, %ymm0, %ymm3
+; NoVLX-NEXT: vpandn %ymm3, %ymm2, %ymm2
+; NoVLX-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: retq
%mask1 = icmp sge <4 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
@@ -262,11 +457,21 @@ define <4 x i64> @test256_16(<4 x i64> %x, i64* %yb.ptr, <4 x i64> %x1, <4 x i64
}
define <8 x i32> @test256_17(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
-; CHECK-LABEL: test256_17:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_17:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_17:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -274,11 +479,21 @@ define <8 x i32> @test256_17(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
}
define <8 x i32> @test256_18(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
-; CHECK-LABEL: test256_18:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_18:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_18:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpneqd %zmm0, %zmm2, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %y, %x
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -286,11 +501,21 @@ define <8 x i32> @test256_18(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
}
define <8 x i32> @test256_19(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
-; CHECK-LABEL: test256_19:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpnltud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_19:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpnltud (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_19:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpnltud %zmm2, %zmm0, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -298,11 +523,21 @@ define <8 x i32> @test256_19(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
}
define <8 x i32> @test256_20(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
-; CHECK-LABEL: test256_20:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test256_20:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleud (%rdi), %ymm0, %k1
+; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test256_20:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
+; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %y, %x
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -310,55 +545,90 @@ define <8 x i32> @test256_20(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
}
define <2 x i64> @test128_1(<2 x i64> %x, <2 x i64> %y) nounwind {
-; CHECK-LABEL: test128_1:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_1:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
+; VLX-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_1:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm2
+; NoVLX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask = icmp eq <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %y
ret <2 x i64> %max
}
define <2 x i64> @test128_2(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind {
-; CHECK-LABEL: test128_2:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_2:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
+; VLX-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_2:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask = icmp sgt <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
ret <2 x i64> %max
}
define <4 x i32> @test128_3(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1) nounwind {
-; CHECK-LABEL: test128_3:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k1
-; CHECK-NEXT: vpblendmd %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_3:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k1
+; VLX-NEXT: vpblendmd %xmm2, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_3:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; NoVLX-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; NoVLX-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask = icmp sge <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x1, <4 x i32> %y
ret <4 x i32> %max
}
define <2 x i64> @test128_4(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind {
-; CHECK-LABEL: test128_4:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_4:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
+; VLX-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_4:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm3, %xmm1, %xmm4
+; NoVLX-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; NoVLX-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm0
+; NoVLX-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask = icmp ugt <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
ret <2 x i64> %max
}
define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwind {
-; CHECK-LABEL: test128_5:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_5:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_5:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -366,11 +636,17 @@ define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwin
}
define <4 x i32> @test128_5b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwind {
-; CHECK-LABEL: test128_5b:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_5b:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_5b:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %y, %x
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -378,11 +654,17 @@ define <4 x i32> @test128_5b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwi
}
define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_6:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_6:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_6:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sgt <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -390,11 +672,17 @@ define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
}
define <4 x i32> @test128_6b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_6b:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_6b:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_6b:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp slt <4 x i32> %y, %x
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -402,11 +690,19 @@ define <4 x i32> @test128_6b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
}
define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_7:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_7:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_7:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sle <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -414,11 +710,19 @@ define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
}
define <4 x i32> @test128_7b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_7b:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_7b:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_7b:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sge <4 x i32> %y, %x
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -426,11 +730,18 @@ define <4 x i32> @test128_7b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
}
define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_8:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_8:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleud (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_8:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpminud (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ule <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -438,11 +749,19 @@ define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
}
define <4 x i32> @test128_8b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_8b:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_8b:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleud (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_8b:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
+; NoVLX-NEXT: vpmaxud %xmm0, %xmm2, %xmm3
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %y, %x
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -450,12 +769,20 @@ define <4 x i32> @test128_8b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
}
define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32> %y1) nounwind {
-; CHECK-LABEL: test128_9:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_9:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
+; VLX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 {%k1}
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_9:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm3
+; NoVLX-NEXT: vpand %xmm2, %xmm3, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask1 = icmp eq <4 x i32> %x1, %y1
%mask0 = icmp eq <4 x i32> %x, %y
%mask = select <4 x i1> %mask0, <4 x i1> %mask1, <4 x i1> zeroinitializer
@@ -464,12 +791,22 @@ define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32>
}
define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) nounwind {
-; CHECK-LABEL: test128_10:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpcmpleq %xmm2, %xmm3, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %xmm0, %xmm2, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_10:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleq %xmm1, %xmm0, %k1
+; VLX-NEXT: vpcmpleq %xmm2, %xmm3, %k1 {%k1}
+; VLX-NEXT: vpblendmq %xmm0, %xmm2, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_10:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm3
+; NoVLX-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; NoVLX-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm1
+; NoVLX-NEXT: vpandn %xmm3, %xmm1, %xmm1
+; NoVLX-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
+; NoVLX-NEXT: retq
%mask1 = icmp sge <2 x i64> %x1, %y1
%mask0 = icmp sle <2 x i64> %x, %y
%mask = select <2 x i1> %mask0, <2 x i1> %mask1, <2 x i1> zeroinitializer
@@ -478,12 +815,20 @@ define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64
}
define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2 x i64> %y1) nounwind {
-; CHECK-LABEL: test128_11:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpgtq %xmm2, %xmm1, %k1
-; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_11:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpgtq %xmm2, %xmm1, %k1
+; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k1 {%k1}
+; VLX-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_11:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm2
+; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm3
+; NoVLX-NEXT: vpand %xmm2, %xmm3, %xmm2
+; NoVLX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask1 = icmp sgt <2 x i64> %x1, %y1
%y = load <2 x i64>, <2 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <2 x i64> %x, %y
@@ -493,12 +838,21 @@ define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2
}
define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4 x i32> %y1) nounwind {
-; CHECK-LABEL: test128_12:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled %xmm1, %xmm2, %k1
-; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_12:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled %xmm1, %xmm2, %k1
+; VLX-NEXT: vpcmpleud (%rdi), %xmm0, %k1 {%k1}
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_12:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm2
+; NoVLX-NEXT: vpminud (%rdi), %xmm0, %xmm3
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm3
+; NoVLX-NEXT: vpandn %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask1 = icmp sge <4 x i32> %x1, %y1
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask0 = icmp ule <4 x i32> %x, %y
@@ -508,11 +862,18 @@ define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4
}
define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind {
-; CHECK-LABEL: test128_13:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k1
-; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_13:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k1
+; VLX-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_13:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm2
+; NoVLX-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm2
+; NoVLX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
%y = insertelement <2 x i64> %y.0, i64 %yb, i32 1
@@ -522,11 +883,20 @@ define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind
}
define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind {
-; CHECK-LABEL: test128_14:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled (%rdi){1to4}, %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_14:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled (%rdi){1to4}, %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_14:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm2
+; NoVLX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm2
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
%y = shufflevector <4 x i32> %y.0, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -536,12 +906,21 @@ define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind
}
define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32> %y1) nounwind {
-; CHECK-LABEL: test128_15:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpled %xmm1, %xmm2, %k1
-; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_15:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpled %xmm1, %xmm2, %k1
+; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k1 {%k1}
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_15:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm2
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm3
+; NoVLX-NEXT: vpcmpgtd %xmm3, %xmm0, %xmm3
+; NoVLX-NEXT: vpandn %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask1 = icmp sge <4 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
@@ -553,12 +932,21 @@ define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32
}
define <2 x i64> @test128_16(<2 x i64> %x, i64* %yb.ptr, <2 x i64> %x1, <2 x i64> %y1) nounwind {
-; CHECK-LABEL: test128_16:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleq %xmm1, %xmm2, %k1
-; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_16:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleq %xmm1, %xmm2, %k1
+; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k1 {%k1}
+; VLX-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_16:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm2
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm3
+; NoVLX-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm3
+; NoVLX-NEXT: vpandn %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%mask1 = icmp sge <2 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
@@ -570,11 +958,19 @@ define <2 x i64> @test128_16(<2 x i64> %x, i64* %yb.ptr, <2 x i64> %x1, <2 x i64
}
define <4 x i32> @test128_17(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_17:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_17:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_17:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ne <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -582,11 +978,19 @@ define <4 x i32> @test128_17(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
}
define <4 x i32> @test128_18(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_18:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_18:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_18:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ne <4 x i32> %y, %x
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -594,11 +998,18 @@ define <4 x i32> @test128_18(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
}
define <4 x i32> @test128_19(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_19:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpnltud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_19:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpnltud (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_19:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vpmaxud (%rdi), %xmm0, %xmm2
+; NoVLX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -606,11 +1017,19 @@ define <4 x i32> @test128_19(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
}
define <4 x i32> @test128_20(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
-; CHECK-LABEL: test128_20:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; VLX-LABEL: test128_20:
+; VLX: # BB#0:
+; VLX-NEXT: vpcmpleud (%rdi), %xmm0, %k1
+; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test128_20:
+; NoVLX: # BB#0:
+; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
+; NoVLX-NEXT: vpmaxud %xmm0, %xmm2, %xmm3
+; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %y, %x
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
diff --git a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
index f297fc3db95f..4d3a1495617e 100644
--- a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
@@ -1,13 +1,124 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s -check-prefix=NoVLX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=VLX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=NoVLX
define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqb_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqb_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqb_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi0:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi2:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi3:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi4:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi5:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi6:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi7:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -18,11 +129,122 @@ entry:
}
define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqb_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqb_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqb_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi8:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi9:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi10:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi11:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi12:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi13:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi14:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi15:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -34,12 +256,124 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi16:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi17:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi18:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi19:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi20:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi21:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi22:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi23:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -52,12 +386,124 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi24:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi25:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi26:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi27:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi28:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi29:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi30:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi31:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -72,11 +518,127 @@ entry:
define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqb_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqb_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqb_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi32:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi33:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi34:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi35:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi36:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi37:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi38:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi39:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -87,11 +649,127 @@ entry:
}
define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqb_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqb_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqb_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi40:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi41:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi42:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi43:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi44:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi45:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi46:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi47:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -103,12 +781,129 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi48:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi49:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi50:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi51:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi52:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi53:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi54:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi55:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -121,12 +916,129 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi56:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi57:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi58:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi59:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi60:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi61:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi62:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi63:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -141,12 +1053,46 @@ entry:
define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqb_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqb_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqb %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqb_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi64:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi65:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi66:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%1 = bitcast <4 x i64> %__b to <32 x i8>
@@ -157,12 +1103,46 @@ entry:
}
define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqb_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqb (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqb_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqb (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqb_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi67:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi68:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi69:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -174,13 +1154,56 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi70:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi71:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi72:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm2, %xmm2
+; NoVLX-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm3, %xmm3
+; NoVLX-NEXT: vpxord %zmm4, %zmm4, %zmm4
+; NoVLX-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpand %xmm3, %xmm1, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm4, %zmm4, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%1 = bitcast <4 x i64> %__b to <32 x i8>
@@ -193,13 +1216,56 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqb (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqb (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi73:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi74:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi75:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm1, %xmm1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm2, %xmm2
+; NoVLX-NEXT: vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm4
+; NoVLX-NEXT: vpand %xmm2, %xmm4, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -214,11 +1280,24 @@ entry:
define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -229,11 +1308,24 @@ entry:
}
define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -245,12 +1337,26 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -263,12 +1369,26 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqw (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -283,11 +1403,72 @@ entry:
define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi76:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi77:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi78:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -298,11 +1479,72 @@ entry:
}
define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi79:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi80:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi81:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -314,12 +1556,74 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi82:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi83:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi84:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -332,12 +1636,74 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi85:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi86:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi87:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqw (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -352,11 +1718,77 @@ entry:
define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi88:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi89:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi90:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -367,11 +1799,77 @@ entry:
}
define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi91:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi92:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi93:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -383,12 +1881,79 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi94:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi95:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi96:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -401,12 +1966,79 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi97:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi98:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi99:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqw (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -421,12 +2053,123 @@ entry:
define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi100:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi101:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi102:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi103:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi104:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi105:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi106:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi107:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -437,12 +2180,123 @@ entry:
}
define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi108:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi109:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi110:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi111:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi112:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi113:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi114:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi115:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -454,13 +2308,125 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi116:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi117:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi118:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi119:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi120:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi121:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi122:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi123:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -473,13 +2439,125 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi124:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi125:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi126:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi127:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi128:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi129:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi130:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi131:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqw (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -494,12 +2572,128 @@ entry:
define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi132:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi133:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi134:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi135:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi136:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi137:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi138:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi139:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -510,12 +2704,128 @@ entry:
}
define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi140:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi141:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi142:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi143:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi144:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi145:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi146:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi147:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -527,13 +2837,130 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi148:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi149:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi150:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi151:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi152:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi153:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi154:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi155:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -546,13 +2973,130 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi156:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi157:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi158:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi159:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi160:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi161:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi162:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi163:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqw (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -567,12 +3111,348 @@ entry:
define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi164:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi165:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi166:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm3
+; NoVLX-NEXT: vmovq %xmm3, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm5
+; NoVLX-NEXT: vextracti32x4 $2, %zmm1, %xmm8
+; NoVLX-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; NoVLX-NEXT: vextracti32x4 $1, %zmm1, %xmm6
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm2
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm2, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm7, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm7, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm6, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm7
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm6, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm8, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm8, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
+; NoVLX-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm3
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm3, %ymm1
+; NoVLX-NEXT: vpmovsxwd %ymm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%1 = bitcast <8 x i64> %__b to <32 x i16>
@@ -583,12 +3463,263 @@ entry:
}
define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqw_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqw (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqw_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqw (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqw_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi167:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi168:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi169:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm2
+; NoVLX-NEXT: vmovq %xmm2, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm1
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm3
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm1
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm4
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpeqw 32(%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpmovsxwd %ymm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %eax, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -600,13 +3731,358 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi170:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi171:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi172:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm2
+; NoVLX-NEXT: vmovq %xmm2, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm3
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm4
+; NoVLX-NEXT: vextracti32x4 $1, %zmm1, %xmm8
+; NoVLX-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; NoVLX-NEXT: vextracti32x4 $3, %zmm1, %xmm7
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm3
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm3, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm9
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm6, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm4
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm6, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm7, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm7, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm5, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm7
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm5, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm8, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm8, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm9, %ymm4, %ymm1
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm8
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vinserti128 $1, %xmm7, %ymm3, %ymm3
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm0
+; NoVLX-NEXT: vpternlogd $255, %zmm6, %zmm6, %zmm6 {%k2} {z}
+; NoVLX-NEXT: vpcmpeqw %ymm3, %ymm1, %ymm4
+; NoVLX-NEXT: vpmovdb %zmm6, %xmm6
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2
+; NoVLX-NEXT: vpcmpeqw %ymm2, %ymm8, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm3
+; NoVLX-NEXT: vpmovsxwd %ymm4, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpand %xmm6, %xmm2, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm0, %xmm3, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%1 = bitcast <8 x i64> %__b to <32 x i16>
@@ -619,13 +4095,273 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqw (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqw (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi173:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi174:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi175:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
+; NoVLX-NEXT: vmovq %xmm1, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm3
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm4
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm3, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm4
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm5
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm4
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm2
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3
+; NoVLX-NEXT: vpcmpeqw (%rsi), %ymm3, %ymm3
+; NoVLX-NEXT: vpmovsxwd %ymm3, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm3
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %eax, %xmm3
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpcmpeqw 32(%rsi), %ymm4, %ymm4
+; NoVLX-NEXT: vpmovsxwd %ymm4, %zmm4
+; NoVLX-NEXT: vpslld $31, %zmm4, %zmm4
+; NoVLX-NEXT: vptestmd %zmm4, %zmm4, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpand %xmm2, %xmm4, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm1, %xmm3, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -640,11 +4376,51 @@ entry:
define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -655,11 +4431,51 @@ entry:
}
define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -671,12 +4487,70 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -690,12 +4564,70 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqd (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -711,11 +4643,52 @@ entry:
define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -728,12 +4701,71 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -750,11 +4782,50 @@ entry:
define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -765,11 +4836,50 @@ entry:
}
define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -781,12 +4891,69 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -800,12 +4967,69 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqd (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -821,11 +5045,51 @@ entry:
define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -838,12 +5102,70 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -860,11 +5182,39 @@ entry:
define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi176:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi177:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi178:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -875,11 +5225,39 @@ entry:
}
define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi179:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi180:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi181:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -891,12 +5269,58 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi182:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi183:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi184:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -910,12 +5334,58 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi185:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi186:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi187:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqd (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -931,11 +5401,40 @@ entry:
define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi188:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi189:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi190:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -948,12 +5447,59 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi191:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi192:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi193:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -970,11 +5516,46 @@ entry:
define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi194:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi195:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi196:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -985,11 +5566,46 @@ entry:
}
define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi197:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi198:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi199:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -1001,12 +5617,65 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi200:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi201:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi202:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -1020,12 +5689,65 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi203:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi204:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi205:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqd (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -1041,11 +5763,47 @@ entry:
define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi206:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi207:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi208:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -1058,12 +5816,66 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi209:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi210:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi211:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -1080,21 +5892,23 @@ entry:
define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -1106,21 +5920,23 @@ entry:
}
define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -1133,23 +5949,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -1163,23 +5981,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -1195,21 +6015,23 @@ entry:
define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -1223,23 +6045,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -1256,12 +6080,72 @@ entry:
define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi212:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi213:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi214:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -1272,12 +6156,72 @@ entry:
}
define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi215:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi216:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi217:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -1289,13 +6233,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi218:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi219:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi220:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -1308,13 +6314,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi221:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi222:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi223:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -1329,12 +6397,72 @@ entry:
define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi224:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi225:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi226:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -1347,13 +6475,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi227:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi228:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi229:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -1369,12 +6559,77 @@ entry:
define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi230:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi231:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi232:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -1385,12 +6640,77 @@ entry:
}
define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi233:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi234:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi235:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -1402,13 +6722,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi236:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi237:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi238:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -1421,13 +6808,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi239:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi240:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi241:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -1442,12 +6896,77 @@ entry:
define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi242:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi243:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi244:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -1460,13 +6979,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi245:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi246:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi247:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -1482,12 +7068,120 @@ entry:
define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi248:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi249:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi250:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi251:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi252:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi253:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi254:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi255:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -1498,12 +7192,120 @@ entry:
}
define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi256:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi257:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi258:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi259:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi260:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi261:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi262:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi263:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -1515,13 +7317,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi264:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi265:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi266:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi267:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi268:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi269:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi270:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi271:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -1534,13 +7445,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi272:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi273:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi274:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi275:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi276:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi277:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi278:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi279:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -1555,12 +7575,120 @@ entry:
define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi280:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi281:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi282:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi283:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi284:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi285:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi286:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi287:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -1573,13 +7701,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi288:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi289:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi290:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi291:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi292:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi293:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi294:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi295:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -1595,12 +7832,125 @@ entry:
define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi296:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi297:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi298:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi299:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi300:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi301:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi302:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi303:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -1611,12 +7961,125 @@ entry:
}
define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi304:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi305:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi306:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi307:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi308:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi309:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi310:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi311:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -1628,13 +8091,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi312:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi313:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi314:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi315:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi316:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi317:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi318:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi319:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -1647,13 +8224,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi320:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi321:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi322:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi323:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi324:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi325:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi326:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi327:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -1668,12 +8359,125 @@ entry:
define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi328:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi329:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi330:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi331:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi332:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi333:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi334:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi335:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -1686,13 +8490,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi336:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi337:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi338:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi339:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi340:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi341:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi342:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi343:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -1708,12 +8626,23 @@ entry:
define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -1724,12 +8653,23 @@ entry:
}
define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -1741,13 +8681,34 @@ entry:
}
define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -1761,13 +8722,34 @@ entry:
}
define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -1783,12 +8765,24 @@ entry:
define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -1801,13 +8795,35 @@ entry:
}
define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -1824,11 +8840,35 @@ entry:
define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -1839,11 +8879,35 @@ entry:
}
define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -1855,12 +8919,46 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -1874,12 +8972,46 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -1895,11 +9027,36 @@ entry:
define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -1912,12 +9069,47 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -1934,11 +9126,34 @@ entry:
define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -1949,11 +9164,34 @@ entry:
}
define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -1965,12 +9203,45 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -1984,12 +9255,45 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -2005,11 +9309,35 @@ entry:
define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -2022,12 +9350,46 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -2044,11 +9406,39 @@ entry:
define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi344:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi345:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi346:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -2059,11 +9449,39 @@ entry:
}
define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi347:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi348:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi349:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -2075,12 +9493,50 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi350:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi351:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi352:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -2094,12 +9550,50 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi353:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi354:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi355:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -2115,11 +9609,40 @@ entry:
define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi356:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi357:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi358:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -2132,12 +9655,51 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi359:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi360:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi361:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -2154,11 +9716,46 @@ entry:
define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi362:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi363:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi364:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -2169,11 +9766,46 @@ entry:
}
define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi365:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi366:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi367:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -2185,12 +9817,57 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi368:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi369:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi370:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -2204,12 +9881,57 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi371:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi372:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi373:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -2225,11 +9947,47 @@ entry:
define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi374:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi375:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi376:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -2242,12 +10000,58 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi377:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi378:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi379:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -2264,12 +10068,53 @@ entry:
define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -2280,12 +10125,53 @@ entry:
}
define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -2297,13 +10183,72 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -2317,13 +10262,72 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -2339,12 +10343,54 @@ entry:
define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -2357,13 +10403,73 @@ entry:
}
define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -2380,12 +10486,52 @@ entry:
define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -2396,12 +10542,52 @@ entry:
}
define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -2413,13 +10599,71 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -2433,13 +10677,71 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -2455,12 +10757,53 @@ entry:
define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -2473,13 +10816,72 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -2496,12 +10898,41 @@ entry:
define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi380:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi381:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi382:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -2512,12 +10943,41 @@ entry:
}
define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi383:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi384:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi385:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -2529,13 +10989,60 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi386:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi387:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi388:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -2549,13 +11056,60 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi389:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi390:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi391:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -2571,12 +11125,42 @@ entry:
define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi392:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi393:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi394:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -2589,13 +11173,61 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi395:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi396:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi397:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -2612,12 +11244,48 @@ entry:
define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi398:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi399:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi400:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -2628,12 +11296,48 @@ entry:
}
define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi401:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi402:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi403:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -2645,13 +11349,67 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi404:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi405:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi406:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -2665,13 +11423,67 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi407:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi408:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi409:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -2687,12 +11499,49 @@ entry:
define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi410:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi411:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi412:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -2705,13 +11554,68 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi413:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi414:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi415:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -2728,12 +11632,20 @@ entry:
define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -2744,12 +11656,20 @@ entry:
}
define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -2761,13 +11681,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -2780,13 +11709,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -2801,12 +11739,20 @@ entry:
define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -2819,13 +11765,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -2841,12 +11796,70 @@ entry:
define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi416:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi417:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi418:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -2857,12 +11870,70 @@ entry:
}
define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi419:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi420:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi421:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -2874,13 +11945,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi422:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi423:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi424:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -2893,13 +12023,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi425:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi426:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi427:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -2914,12 +12103,70 @@ entry:
define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi428:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi429:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi430:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -2932,13 +12179,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi431:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi432:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi433:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -2954,12 +12260,75 @@ entry:
define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi434:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi435:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi436:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -2970,12 +12339,75 @@ entry:
}
define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi437:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi438:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi439:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -2987,13 +12419,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi440:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi441:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi442:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -3006,13 +12502,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi443:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi444:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi445:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -3027,12 +12587,75 @@ entry:
define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi446:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi447:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi448:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -3045,13 +12668,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi449:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi450:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi451:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -3067,11 +12754,122 @@ entry:
define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi452:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi453:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi454:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi455:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi456:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi457:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi458:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi459:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -3082,11 +12880,122 @@ entry:
}
define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi460:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi461:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi462:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi463:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi464:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi465:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi466:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi467:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3098,12 +13007,124 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi468:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi469:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi470:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi471:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi472:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi473:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi474:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi475:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -3116,12 +13137,124 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi476:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi477:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi478:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi479:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi480:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi481:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi482:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi483:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3136,11 +13269,127 @@ entry:
define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi484:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi485:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi486:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi487:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi488:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi489:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi490:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi491:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -3151,11 +13400,127 @@ entry:
}
define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi492:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi493:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi494:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi495:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi496:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi497:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi498:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi499:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3167,12 +13532,129 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi500:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi501:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi502:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi503:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi504:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi505:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi506:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi507:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -3185,12 +13667,129 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi508:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi509:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi510:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi511:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi512:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi513:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi514:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi515:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3205,12 +13804,46 @@ entry:
define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi516:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi517:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi518:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%1 = bitcast <4 x i64> %__b to <32 x i8>
@@ -3221,12 +13854,46 @@ entry:
}
define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtb (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtb (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi519:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi520:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi521:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -3238,13 +13905,56 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi522:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi523:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi524:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm2, %xmm2
+; NoVLX-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm3, %xmm3
+; NoVLX-NEXT: vpxord %zmm4, %zmm4, %zmm4
+; NoVLX-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpand %xmm3, %xmm1, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm4, %zmm4, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%1 = bitcast <4 x i64> %__b to <32 x i8>
@@ -3257,13 +13967,56 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtb (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtb (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi525:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi526:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi527:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm1, %xmm1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm2, %xmm2
+; NoVLX-NEXT: vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT: vpcmpgtb (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm4
+; NoVLX-NEXT: vpand %xmm2, %xmm4, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -3278,11 +14031,24 @@ entry:
define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -3293,11 +14059,24 @@ entry:
}
define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3309,12 +14088,26 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -3327,12 +14120,26 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtw (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3347,11 +14154,72 @@ entry:
define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi528:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi529:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi530:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -3362,11 +14230,72 @@ entry:
}
define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi531:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi532:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi533:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3378,12 +14307,74 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi534:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi535:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi536:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -3396,12 +14387,74 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi537:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi538:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi539:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtw (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3416,11 +14469,77 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi540:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi541:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi542:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -3431,11 +14550,77 @@ entry:
}
define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi543:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi544:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi545:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3447,12 +14632,79 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi546:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi547:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi548:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -3465,12 +14717,79 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi549:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi550:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi551:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtw (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3485,12 +14804,123 @@ entry:
define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi552:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi553:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi554:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi555:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi556:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi557:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi558:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi559:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -3501,12 +14931,123 @@ entry:
}
define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi560:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi561:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi562:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi563:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi564:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi565:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi566:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi567:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -3518,13 +15059,125 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi568:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi569:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi570:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi571:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi572:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi573:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi574:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi575:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -3537,13 +15190,125 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi576:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi577:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi578:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi579:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi580:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi581:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi582:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi583:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -3558,12 +15323,128 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi584:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi585:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi586:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi587:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi588:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi589:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi590:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi591:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -3574,12 +15455,128 @@ entry:
}
define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi592:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi593:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi594:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi595:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi596:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi597:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi598:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi599:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -3591,13 +15588,130 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi600:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi601:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi602:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi603:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi604:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi605:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi606:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi607:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -3610,13 +15724,130 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi608:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi609:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi610:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi611:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi612:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi613:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi614:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi615:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -3631,12 +15862,348 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi616:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi617:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi618:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm3
+; NoVLX-NEXT: vmovq %xmm3, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm5
+; NoVLX-NEXT: vextracti32x4 $2, %zmm1, %xmm8
+; NoVLX-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; NoVLX-NEXT: vextracti32x4 $1, %zmm1, %xmm6
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm2
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm2, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm7, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm7, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm6, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm7
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm6, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm8, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm8, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
+; NoVLX-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm3
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm1
+; NoVLX-NEXT: vpmovsxwd %ymm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%1 = bitcast <8 x i64> %__b to <32 x i16>
@@ -3647,12 +16214,263 @@ entry:
}
define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtw (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtw (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi619:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi620:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi621:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm2
+; NoVLX-NEXT: vmovq %xmm2, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm1
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm3
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm1
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm4
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw 32(%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpmovsxwd %ymm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %eax, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -3664,13 +16482,358 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi622:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi623:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi624:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm2
+; NoVLX-NEXT: vmovq %xmm2, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm3
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm4
+; NoVLX-NEXT: vextracti32x4 $1, %zmm1, %xmm8
+; NoVLX-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; NoVLX-NEXT: vextracti32x4 $3, %zmm1, %xmm7
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm3
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm3, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm9
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm6, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm4
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm6, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm7, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm7, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm5, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm7
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm5, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm8, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm8, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm9, %ymm4, %ymm1
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm8
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vinserti128 $1, %xmm7, %ymm3, %ymm3
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm0
+; NoVLX-NEXT: vpternlogd $255, %zmm6, %zmm6, %zmm6 {%k2} {z}
+; NoVLX-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm4
+; NoVLX-NEXT: vpmovdb %zmm6, %xmm6
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2
+; NoVLX-NEXT: vpcmpgtw %ymm2, %ymm8, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm3
+; NoVLX-NEXT: vpmovsxwd %ymm4, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpand %xmm6, %xmm2, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm0, %xmm3, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%1 = bitcast <8 x i64> %__b to <32 x i16>
@@ -3683,13 +16846,273 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtw (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtw (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi625:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi626:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi627:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
+; NoVLX-NEXT: vmovq %xmm1, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm3
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm4
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm3, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm4
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm5
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm4
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm2
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3
+; NoVLX-NEXT: vpcmpgtw (%rsi), %ymm3, %ymm3
+; NoVLX-NEXT: vpmovsxwd %ymm3, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm3
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %eax, %xmm3
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpcmpgtw 32(%rsi), %ymm4, %ymm4
+; NoVLX-NEXT: vpmovsxwd %ymm4, %zmm4
+; NoVLX-NEXT: vpslld $31, %zmm4, %zmm4
+; NoVLX-NEXT: vptestmd %zmm4, %zmm4, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpand %xmm2, %xmm4, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm1, %xmm3, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -3704,11 +17127,51 @@ entry:
define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -3719,11 +17182,51 @@ entry:
}
define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3735,12 +17238,70 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -3754,12 +17315,70 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3775,11 +17394,52 @@ entry:
define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -3792,12 +17452,71 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -3814,11 +17533,50 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -3829,11 +17587,50 @@ entry:
}
define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3845,12 +17642,69 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -3864,12 +17718,69 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3885,11 +17796,51 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -3902,12 +17853,70 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -3924,11 +17933,39 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi628:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi629:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi630:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -3939,11 +17976,39 @@ entry:
}
define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi631:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi632:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi633:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3955,12 +18020,58 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi634:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi635:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi636:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -3974,12 +18085,58 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi637:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi638:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi639:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtd (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -3995,11 +18152,40 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi640:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi641:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi642:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -4012,12 +18198,59 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi643:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi644:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi645:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -4034,11 +18267,46 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi646:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi647:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi648:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -4049,11 +18317,46 @@ entry:
}
define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi649:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi650:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi651:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -4065,12 +18368,65 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi652:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi653:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi654:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -4084,12 +18440,65 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi655:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi656:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi657:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtd (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -4105,11 +18514,47 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi658:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi659:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi660:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -4122,12 +18567,66 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi661:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi662:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi663:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -4144,21 +18643,23 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -4170,21 +18671,23 @@ entry:
}
define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -4197,23 +18700,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -4227,23 +18732,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -4259,21 +18766,23 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -4287,23 +18796,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -4320,12 +18831,72 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi664:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi665:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi666:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -4336,12 +18907,72 @@ entry:
}
define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi667:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi668:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi669:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -4353,13 +18984,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi670:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi671:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi672:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -4372,13 +19065,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi673:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi674:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi675:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -4393,12 +19148,72 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi676:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi677:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi678:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -4411,13 +19226,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi679:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi680:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi681:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -4433,12 +19310,77 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi682:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi683:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi684:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -4449,12 +19391,77 @@ entry:
}
define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi685:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi686:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi687:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -4466,13 +19473,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi688:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi689:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi690:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -4485,13 +19559,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi691:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi692:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi693:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -4506,12 +19647,77 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi694:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi695:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi696:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -4524,13 +19730,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi697:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi698:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi699:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -4546,12 +19819,120 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi700:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi701:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi702:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi703:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi704:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi705:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi706:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi707:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -4562,12 +19943,120 @@ entry:
}
define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi708:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi709:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi710:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi711:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi712:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi713:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi714:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi715:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -4579,13 +20068,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi716:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi717:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi718:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi719:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi720:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi721:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi722:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi723:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -4598,13 +20196,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi724:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi725:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi726:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi727:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi728:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi729:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi730:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi731:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -4619,12 +20326,120 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi732:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi733:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi734:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi735:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi736:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi737:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi738:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi739:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -4637,13 +20452,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi740:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi741:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi742:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi743:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi744:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi745:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi746:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi747:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -4659,12 +20583,125 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi748:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi749:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi750:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi751:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi752:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi753:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi754:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi755:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -4675,12 +20712,125 @@ entry:
}
define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi756:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi757:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi758:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi759:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi760:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi761:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi762:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi763:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -4692,13 +20842,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi764:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi765:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi766:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi767:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi768:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi769:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi770:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi771:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -4711,13 +20975,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi772:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi773:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi774:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi775:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi776:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi777:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi778:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi779:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -4732,12 +21110,125 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi780:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi781:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi782:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi783:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi784:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi785:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi786:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi787:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -4750,13 +21241,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi788:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi789:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi790:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi791:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi792:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi793:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi794:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi795:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -4772,12 +21377,23 @@ entry:
define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -4788,12 +21404,23 @@ entry:
}
define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -4805,13 +21432,34 @@ entry:
}
define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -4825,13 +21473,34 @@ entry:
}
define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -4847,12 +21516,24 @@ entry:
define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -4865,13 +21546,35 @@ entry:
}
define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -4888,11 +21591,35 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -4903,11 +21630,35 @@ entry:
}
define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -4919,12 +21670,46 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -4938,12 +21723,46 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -4959,11 +21778,36 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -4976,12 +21820,47 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -4998,11 +21877,34 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -5013,11 +21915,34 @@ entry:
}
define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -5029,12 +21954,45 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -5048,12 +22006,45 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -5069,11 +22060,35 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -5086,12 +22101,46 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -5108,11 +22157,39 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi796:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi797:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi798:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -5123,11 +22200,39 @@ entry:
}
define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi799:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi800:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi801:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -5139,12 +22244,50 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi802:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi803:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi804:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -5158,12 +22301,50 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi805:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi806:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi807:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -5179,11 +22360,40 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi808:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi809:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi810:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -5196,12 +22406,51 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi811:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi812:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi813:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -5218,11 +22467,46 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi814:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi815:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi816:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -5233,11 +22517,46 @@ entry:
}
define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi817:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi818:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi819:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -5249,12 +22568,57 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi820:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi821:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi822:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -5268,12 +22632,57 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi823:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi824:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi825:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rsi), %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -5289,11 +22698,47 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi826:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi827:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi828:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -5306,12 +22751,58 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi829:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi830:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi831:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -5328,12 +22819,53 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -5344,12 +22876,53 @@ entry:
}
define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -5361,13 +22934,72 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -5381,13 +23013,72 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -5403,12 +23094,54 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -5421,13 +23154,73 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -5444,12 +23237,52 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -5460,12 +23293,52 @@ entry:
}
define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -5477,13 +23350,71 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -5497,13 +23428,71 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -5519,12 +23508,53 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -5537,13 +23567,72 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -5560,12 +23649,41 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi832:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi833:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi834:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -5576,12 +23694,41 @@ entry:
}
define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi835:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi836:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi837:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -5593,13 +23740,60 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi838:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi839:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi840:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -5613,13 +23807,60 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi841:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi842:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi843:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -5635,12 +23876,42 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi844:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi845:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi846:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -5653,13 +23924,61 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi847:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi848:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi849:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -5676,12 +23995,48 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi850:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi851:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi852:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -5692,12 +24047,48 @@ entry:
}
define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi853:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi854:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi855:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -5709,13 +24100,67 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi856:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi857:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi858:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -5729,13 +24174,67 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi859:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi860:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi861:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rsi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -5751,12 +24250,49 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi862:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi863:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi864:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -5769,13 +24305,68 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi865:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi866:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi867:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -5792,12 +24383,20 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -5808,12 +24407,20 @@ entry:
}
define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -5825,13 +24432,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -5844,13 +24460,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -5865,12 +24490,20 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -5883,13 +24516,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -5905,12 +24547,70 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi868:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi869:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi870:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -5921,12 +24621,70 @@ entry:
}
define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi871:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi872:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi873:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -5938,13 +24696,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi874:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi875:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi876:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -5957,13 +24774,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi877:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi878:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi879:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -5978,12 +24854,70 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi880:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi881:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi882:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -5996,13 +24930,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi883:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi884:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi885:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -6018,12 +25011,75 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi886:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi887:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi888:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -6034,12 +25090,75 @@ entry:
}
define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi889:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi890:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi891:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -6051,13 +25170,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi892:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi893:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi894:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -6070,13 +25253,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi895:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi896:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi897:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -6091,12 +25338,75 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi898:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi899:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi900:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -6109,13 +25419,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi901:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi902:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi903:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -6131,11 +25505,124 @@ entry:
define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleb %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi904:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi905:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi906:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi907:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi908:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi909:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi910:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi911:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -6146,11 +25633,125 @@ entry:
}
define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi912:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi913:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi914:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi915:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi916:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi917:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi918:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi919:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6162,12 +25763,126 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi920:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi921:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi922:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi923:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi924:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi925:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi926:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi927:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -6180,12 +25895,127 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi928:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi929:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi930:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi931:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi932:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi933:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi934:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi935:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6200,11 +26030,129 @@ entry:
define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleb %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi936:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi937:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi938:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi939:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi940:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi941:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi942:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi943:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -6215,11 +26163,130 @@ entry:
}
define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi944:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi945:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi946:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi947:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi948:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi949:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi950:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi951:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6231,12 +26298,131 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi952:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi953:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi954:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi955:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi956:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi957:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi958:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi959:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -6249,12 +26435,132 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi960:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi961:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi962:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi963:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi964:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi965:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi966:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi967:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6269,12 +26575,48 @@ entry:
define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleb %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleb %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi968:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi969:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi970:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%1 = bitcast <4 x i64> %__b to <32 x i8>
@@ -6285,12 +26627,49 @@ entry:
}
define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltb (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltb (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi971:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi972:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi973:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -6302,13 +26681,58 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleb %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleb %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi974:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi975:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi976:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm2, %xmm2
+; NoVLX-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm3, %xmm3
+; NoVLX-NEXT: vpxord %zmm4, %zmm4, %zmm4
+; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpand %xmm3, %xmm1, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm4, %zmm4, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%1 = bitcast <4 x i64> %__b to <32 x i8>
@@ -6321,13 +26745,59 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltb (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltb (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi977:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi978:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi979:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm1, %xmm1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm2, %xmm2
+; NoVLX-NEXT: vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm4
+; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm4, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4
+; NoVLX-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm4
+; NoVLX-NEXT: vpand %xmm2, %xmm4, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -6342,11 +26812,26 @@ entry:
define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -6357,11 +26842,27 @@ entry:
}
define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6373,12 +26874,28 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -6391,12 +26908,29 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6411,11 +26945,74 @@ entry:
define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi980:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi981:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi982:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -6426,11 +27023,75 @@ entry:
}
define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi983:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi984:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi985:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6442,12 +27103,76 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi986:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi987:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi988:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -6460,12 +27185,77 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi989:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi990:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi991:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6480,11 +27270,79 @@ entry:
define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi992:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi993:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi994:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -6495,11 +27353,80 @@ entry:
}
define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi995:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi996:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi997:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6511,12 +27438,81 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi998:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi999:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1000:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -6529,12 +27525,82 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1001:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1002:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1003:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6549,12 +27615,125 @@ entry:
define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmplew %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1004:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1005:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1006:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1007:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1008:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1009:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1010:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1011:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -6565,12 +27744,126 @@ entry:
}
define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1012:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1013:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1014:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1015:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1016:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1017:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1018:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1019:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -6582,13 +27875,127 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1020:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1021:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1022:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1023:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1024:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1025:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1026:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1027:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -6601,13 +28008,128 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1028:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1029:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1030:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1031:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1032:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1033:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1034:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1035:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -6622,12 +28144,130 @@ entry:
define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmplew %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1036:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1037:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1038:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1039:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1040:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1041:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1042:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1043:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -6638,12 +28278,131 @@ entry:
}
define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1044:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1045:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1046:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1047:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1048:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1049:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1050:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1051:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -6655,13 +28414,132 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1052:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1053:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1054:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1055:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1056:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1057:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1058:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1059:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -6674,13 +28552,133 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1060:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1061:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1062:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1063:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1064:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1065:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1066:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1067:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -6695,12 +28693,351 @@ entry:
define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmplew %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1068:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1069:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1070:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm3
+; NoVLX-NEXT: vmovq %xmm3, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm5
+; NoVLX-NEXT: vextracti32x4 $2, %zmm1, %xmm8
+; NoVLX-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; NoVLX-NEXT: vextracti32x4 $1, %zmm1, %xmm6
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm2
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm2, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm7, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm7, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm6, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm7
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm6, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm8, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm8, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
+; NoVLX-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm3
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm2
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%1 = bitcast <8 x i64> %__b to <32 x i16>
@@ -6711,12 +29048,268 @@ entry:
}
define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgew_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltw (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgew_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltw (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgew_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1071:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1072:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1073:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm2
+; NoVLX-NEXT: vmovq %xmm2, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm1
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm3
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm1
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm4
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm2
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm0
+; NoVLX-NEXT: vmovdqa 32(%rdi), %ymm2
+; NoVLX-NEXT: vpcmpgtw %ymm1, %ymm2, %ymm2
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -6728,13 +29321,361 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmplew %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1074:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1075:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1076:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm2
+; NoVLX-NEXT: vmovq %xmm2, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm3
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm4
+; NoVLX-NEXT: vextracti32x4 $1, %zmm1, %xmm8
+; NoVLX-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; NoVLX-NEXT: vextracti32x4 $3, %zmm1, %xmm7
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm3
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm3, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm9
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm6, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm4
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm6, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm7, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm7, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm5, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm7
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm5, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm8, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm8, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm5, %xmm5
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm9, %ymm4, %ymm1
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm8
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vinserti128 $1, %xmm7, %ymm3, %ymm3
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm0
+; NoVLX-NEXT: vpternlogd $255, %zmm6, %zmm6, %zmm6 {%k2} {z}
+; NoVLX-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
+; NoVLX-NEXT: vpmovdb %zmm6, %xmm6
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2
+; NoVLX-NEXT: vpcmpgtw %ymm8, %ymm2, %ymm2
+; NoVLX-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
+; NoVLX-NEXT: vpxor %ymm5, %ymm2, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm3
+; NoVLX-NEXT: vpxor %ymm5, %ymm4, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpand %xmm6, %xmm2, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm0, %xmm3, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%1 = bitcast <8 x i64> %__b to <32 x i16>
@@ -6747,13 +29688,278 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltw (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltw (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1077:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1078:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1079:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
+; NoVLX-NEXT: vmovq %xmm1, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm3
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm4
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm3, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm4
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm5
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm4
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm2
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm5
+; NoVLX-NEXT: vpcmpgtw %ymm3, %ymm5, %ymm3
+; NoVLX-NEXT: vmovdqa 32(%rsi), %ymm5
+; NoVLX-NEXT: vpcmpgtw %ymm4, %ymm5, %ymm4
+; NoVLX-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
+; NoVLX-NEXT: vpxor %ymm5, %ymm3, %ymm3
+; NoVLX-NEXT: vpmovsxwd %ymm3, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm3
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpxor %ymm5, %ymm4, %ymm4
+; NoVLX-NEXT: vpmovsxwd %ymm4, %zmm4
+; NoVLX-NEXT: vpslld $31, %zmm4, %zmm4
+; NoVLX-NEXT: vptestmd %zmm4, %zmm4, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpand %xmm2, %xmm4, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm1, %xmm3, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -6768,11 +29974,53 @@ entry:
define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -6783,11 +30031,54 @@ entry:
}
define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6799,12 +30090,70 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -6818,12 +30167,71 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6839,12 +30247,55 @@ entry:
define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -6857,13 +30308,72 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -6880,11 +30390,52 @@ entry:
define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -6895,11 +30446,53 @@ entry:
}
define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6911,12 +30504,69 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -6930,12 +30580,70 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -6951,12 +30659,54 @@ entry:
define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -6969,13 +30719,71 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -6992,11 +30800,41 @@ entry:
define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1080:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1081:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1082:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -7007,11 +30845,42 @@ entry:
}
define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1083:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1084:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1085:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -7023,12 +30892,58 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1086:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1087:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1088:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -7042,12 +30957,59 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1089:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1090:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1091:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -7063,12 +31025,43 @@ entry:
define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1092:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1093:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1094:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -7081,13 +31074,60 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1095:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1096:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1097:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -7104,11 +31144,48 @@ entry:
define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1098:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1099:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1100:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -7119,11 +31196,49 @@ entry:
}
define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1101:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1102:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1103:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -7135,12 +31250,65 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1104:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1105:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1106:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -7154,12 +31322,66 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1107:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1108:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1109:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -7175,12 +31397,50 @@ entry:
define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1110:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1111:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1112:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -7193,13 +31453,67 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1113:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1114:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1115:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -7216,21 +31530,23 @@ entry:
define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -7242,21 +31558,23 @@ entry:
}
define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -7269,23 +31587,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -7299,23 +31619,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -7331,22 +31653,24 @@ entry:
define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -7360,24 +31684,26 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -7394,12 +31720,72 @@ entry:
define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1116:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1117:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1118:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -7410,12 +31796,72 @@ entry:
}
define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1119:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1120:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1121:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -7427,13 +31873,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1122:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1123:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1124:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -7446,13 +31954,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1125:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1126:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1127:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -7467,13 +32037,73 @@ entry:
define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1128:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1129:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1130:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -7486,14 +32116,76 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1131:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1132:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1133:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -7509,12 +32201,77 @@ entry:
define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1134:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1135:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1136:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -7525,12 +32282,77 @@ entry:
}
define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1137:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1138:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1139:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -7542,13 +32364,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1140:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1141:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1142:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -7561,13 +32450,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1143:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1144:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1145:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -7582,13 +32538,78 @@ entry:
define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1146:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1147:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1148:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -7601,14 +32622,81 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1149:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1150:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1151:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -7624,12 +32712,120 @@ entry:
define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1152:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1153:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1154:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1155:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1156:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1157:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1158:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1159:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -7640,12 +32836,120 @@ entry:
}
define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1160:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1161:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1162:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1163:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1164:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1165:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1166:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1167:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -7657,13 +32961,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1168:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1169:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1170:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1171:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1172:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1173:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1174:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1175:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -7676,13 +33089,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1176:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1177:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1178:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1179:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1180:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1181:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1182:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1183:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -7697,13 +33219,122 @@ entry:
define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %zmm1
-; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %zmm1
+; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1184:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1185:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1186:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1187:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1188:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1189:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1190:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1191:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpbroadcastd (%rdi), %zmm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -7716,14 +33347,124 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %zmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %zmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1192:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1193:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1194:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1195:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1196:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1197:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1198:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1199:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpbroadcastd (%rsi), %zmm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -7739,12 +33480,125 @@ entry:
define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1200:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1201:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1202:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1203:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1204:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1205:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1206:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1207:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -7755,12 +33609,125 @@ entry:
}
define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1208:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1209:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1210:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1211:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1212:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1213:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1214:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1215:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -7772,13 +33739,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1216:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1217:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1218:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1219:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1220:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1221:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1222:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1223:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -7791,13 +33872,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1224:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1225:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1226:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1227:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1228:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1229:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1230:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1231:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -7812,13 +34007,127 @@ entry:
define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rdi), %zmm1
-; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rdi), %zmm1
+; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1232:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1233:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1234:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1235:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1236:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1237:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1238:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1239:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpbroadcastd (%rdi), %zmm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -7831,14 +34140,129 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd (%rsi), %zmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastd (%rsi), %zmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1240:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1241:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1242:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1243:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1244:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1245:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1246:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1247:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpbroadcastd (%rsi), %zmm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -7854,12 +34278,25 @@ entry:
define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -7870,12 +34307,26 @@ entry:
}
define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -7887,13 +34338,34 @@ entry:
}
define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -7907,13 +34379,35 @@ entry:
}
define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -7929,13 +34423,27 @@ entry:
define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -7948,14 +34456,36 @@ entry:
}
define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -7972,11 +34502,37 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -7987,11 +34543,38 @@ entry:
}
define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -8003,12 +34586,46 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -8022,12 +34639,47 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -8043,12 +34695,39 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -8061,13 +34740,48 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -8084,11 +34798,36 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -8099,11 +34838,37 @@ entry:
}
define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -8115,12 +34880,45 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -8134,12 +34932,46 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -8155,12 +34987,38 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -8173,13 +35031,47 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -8196,11 +35088,41 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1248:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1249:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1250:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -8211,11 +35133,42 @@ entry:
}
define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1251:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1252:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1253:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -8227,12 +35180,50 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1254:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1255:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1256:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -8246,12 +35237,51 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1257:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1258:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1259:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -8267,12 +35297,43 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1260:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1261:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1262:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -8285,13 +35346,52 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1263:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1264:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1265:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -8308,11 +35408,48 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1266:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1267:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1268:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -8323,11 +35460,49 @@ entry:
}
define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1269:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1270:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1271:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -8339,12 +35514,57 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1272:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1273:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1274:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -8358,12 +35578,58 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1275:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1276:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1277:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -8379,12 +35645,50 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1278:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1279:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1280:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -8397,13 +35701,59 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1281:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1282:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1283:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -8420,12 +35770,55 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -8436,12 +35829,56 @@ entry:
}
define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -8453,13 +35890,74 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -8473,13 +35971,75 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -8495,13 +36055,57 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -8514,14 +36118,76 @@ entry:
}
define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -8538,12 +36204,54 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -8554,12 +36262,55 @@ entry:
}
define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -8571,13 +36322,73 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -8591,13 +36402,74 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -8613,13 +36485,56 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -8632,14 +36547,75 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -8656,12 +36632,43 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1284:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1285:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1286:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -8672,12 +36679,44 @@ entry:
}
define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1287:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1288:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1289:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -8689,13 +36728,62 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1290:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1291:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1292:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -8709,13 +36797,63 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1293:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1294:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1295:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -8731,13 +36869,45 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1296:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1297:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1298:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -8750,14 +36920,64 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1299:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1300:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1301:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -8774,12 +36994,50 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1302:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1303:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1304:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -8790,12 +37048,51 @@ entry:
}
define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1305:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1306:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1307:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -8807,13 +37104,69 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1308:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1309:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1310:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -8827,13 +37180,70 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1311:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1312:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1313:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -8849,13 +37259,52 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1314:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1315:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1316:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -8868,14 +37317,71 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1317:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1318:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1319:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -8892,12 +37398,20 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -8908,12 +37422,20 @@ entry:
}
define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -8925,13 +37447,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -8944,13 +37475,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -8965,13 +37505,22 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %zmm1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %zmm1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -8984,14 +37533,24 @@ entry:
}
define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %zmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %zmm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -9007,12 +37566,70 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1320:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1321:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1322:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -9023,12 +37640,70 @@ entry:
}
define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1323:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1324:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1325:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -9040,13 +37715,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1326:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1327:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1328:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -9059,13 +37793,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1329:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1330:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1331:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -9080,13 +37873,72 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %zmm1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1332:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1333:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1334:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %zmm1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -9099,14 +37951,74 @@ entry:
}
define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %zmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1335:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1336:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1337:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %zmm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -9122,12 +38034,75 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1338:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1339:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1340:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -9138,12 +38113,75 @@ entry:
}
define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1341:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1342:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1343:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -9155,13 +38193,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1344:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1345:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1346:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -9174,13 +38276,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1347:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1348:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1349:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -9195,13 +38361,77 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rdi), %zmm1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1350:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1351:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1352:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %zmm1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -9214,14 +38444,79 @@ entry:
}
define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpbroadcastq (%rsi), %zmm1
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1353:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1354:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1355:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %zmm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -9237,11 +38532,125 @@ entry:
define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultb_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultb_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltub %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultb_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1356:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1357:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1358:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1359:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1360:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1361:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1362:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1363:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -9252,11 +38661,125 @@ entry:
}
define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultb_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltub (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultb_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltub (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultb_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1364:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1365:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1366:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1367:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1368:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1369:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1370:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1371:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9268,12 +38791,127 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1372:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1373:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1374:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1375:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1376:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1377:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1378:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1379:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -9286,12 +38924,127 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1380:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1381:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1382:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1383:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1384:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1385:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1386:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1387:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9306,11 +39059,130 @@ entry:
define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultb_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultb_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltub %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultb_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1388:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1389:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1390:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1391:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1392:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1393:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1394:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1395:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -9321,11 +39193,130 @@ entry:
}
define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultb_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltub (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultb_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltub (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultb_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1396:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1397:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1398:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1399:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1400:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1401:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1402:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1403:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9337,12 +39328,132 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1404:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1405:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1406:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1407:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1408:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1409:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1410:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1411:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%1 = bitcast <2 x i64> %__b to <16 x i8>
@@ -9355,12 +39466,132 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1412:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1413:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1414:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1415:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1416:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1417:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1418:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1419:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <16 x i8>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9375,12 +39606,49 @@ entry:
define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultb_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltub %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultb_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltub %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultb_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1420:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1421:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1422:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%1 = bitcast <4 x i64> %__b to <32 x i8>
@@ -9391,12 +39659,49 @@ entry:
}
define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultb_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltub (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultb_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltub (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultb_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1423:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1424:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1425:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -9408,13 +39713,59 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltub %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltub %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1426:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1427:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1428:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm2, %xmm2
+; NoVLX-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm3, %xmm3
+; NoVLX-NEXT: vpxord %zmm4, %zmm4, %zmm4
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %ymm5, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm5, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpand %xmm3, %xmm1, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm4, %zmm4, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%1 = bitcast <4 x i64> %__b to <32 x i8>
@@ -9427,13 +39778,59 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltub (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltub (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1429:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1430:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1431:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm1, %xmm1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm2, %xmm2
+; NoVLX-NEXT: vpxord %zmm3, %zmm3, %zmm3
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; NoVLX-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rsi), %ymm4, %ymm4
+; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm4, %ymm0
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm4
+; NoVLX-NEXT: vpand %xmm2, %xmm4, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <32 x i8>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -9448,11 +39845,27 @@ entry:
define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -9463,11 +39876,27 @@ entry:
}
define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9479,12 +39908,29 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -9497,12 +39943,29 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kunpckbw %k0, %k1, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9517,11 +39980,75 @@ entry:
define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1432:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1433:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1434:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -9532,11 +40059,75 @@ entry:
}
define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1435:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1436:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1437:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9548,12 +40139,77 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1438:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1439:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1440:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -9566,12 +40222,77 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1441:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1442:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1443:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9586,11 +40307,80 @@ entry:
define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1444:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1445:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1446:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -9601,11 +40391,80 @@ entry:
}
define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1447:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1448:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1449:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9617,12 +40476,82 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1450:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1451:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1452:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%1 = bitcast <2 x i64> %__b to <8 x i16>
@@ -9635,12 +40564,82 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1453:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1454:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1455:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
+; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <8 x i16>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9655,12 +40654,126 @@ entry:
define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1456:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1457:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1458:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1459:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1460:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1461:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1462:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1463:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm2 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -9671,12 +40784,126 @@ entry:
}
define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1464:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1465:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1466:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1467:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1468:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1469:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1470:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1471:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -9688,13 +40915,128 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1472:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1473:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1474:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1475:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1476:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1477:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1478:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1479:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm2 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -9707,13 +41049,128 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1480:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1481:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1482:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1483:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1484:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1485:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1486:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1487:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rsi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -9728,12 +41185,131 @@ entry:
define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1488:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1489:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1490:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1491:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1492:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1493:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1494:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1495:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm2 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -9744,12 +41320,131 @@ entry:
}
define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1496:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1497:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1498:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1499:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1500:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1501:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1502:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1503:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -9761,13 +41456,133 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1504:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1505:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1506:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1507:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1508:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1509:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1510:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1511:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm2 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%1 = bitcast <4 x i64> %__b to <16 x i16>
@@ -9780,13 +41595,133 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1512:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1513:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1514:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1515:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1516:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1517:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1518:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1519:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rsi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <16 x i16>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -9801,12 +41736,353 @@ entry:
define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1520:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1521:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1522:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm3
+; NoVLX-NEXT: vmovq %xmm3, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm5
+; NoVLX-NEXT: vextracti32x4 $2, %zmm1, %xmm8
+; NoVLX-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; NoVLX-NEXT: vextracti32x4 $1, %zmm1, %xmm6
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm2
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm2, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm5
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm5, %xmm5
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm5, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm7, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm7, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm6, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm7
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm6, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm8, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm8, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
+; NoVLX-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm3
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm1, %ymm1
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm4
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm2 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm2, %ymm3, %ymm3
+; NoVLX-NEXT: vpxor %ymm2, %ymm4, %ymm4
+; NoVLX-NEXT: vpcmpgtw %ymm3, %ymm4, %ymm3
+; NoVLX-NEXT: vpmovsxwd %ymm3, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm3
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpmovsxbd %xmm3, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm3
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%1 = bitcast <8 x i64> %__b to <32 x i16>
@@ -9817,12 +42093,268 @@ entry:
}
define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultw_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuw (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultw_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuw (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultw_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1523:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1524:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1525:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm2
+; NoVLX-NEXT: vmovq %xmm2, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm1
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm3
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm1
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm4
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm2
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; NoVLX-NEXT: vpxor 32(%rdi), %ymm1, %ymm3
+; NoVLX-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -9834,13 +42366,363 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1526:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1527:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1528:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm2
+; NoVLX-NEXT: vmovq %xmm2, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm3
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm4
+; NoVLX-NEXT: vextracti32x4 $1, %zmm1, %xmm8
+; NoVLX-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; NoVLX-NEXT: vextracti32x4 $3, %zmm1, %xmm7
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm3
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm2, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm2
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; NoVLX-NEXT: vmovq %xmm3, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm9
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpextrq $1, %xmm3, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm6, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm4
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm6, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: vmovq %xmm7, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm7, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm5, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm7
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm5, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm8, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm8, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vmovq %xmm1, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm3
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: vinserti128 $1, %xmm9, %ymm4, %ymm8
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm4
+; NoVLX-NEXT: vpmovdb %zmm1, %xmm0
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; NoVLX-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5
+; NoVLX-NEXT: vpmovdb %zmm1, %xmm7
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm3
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm6 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm6, %ymm4, %ymm3
+; NoVLX-NEXT: vpxor %ymm6, %ymm2, %ymm2
+; NoVLX-NEXT: vpcmpgtw %ymm3, %ymm2, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm4
+; NoVLX-NEXT: vpxor %ymm6, %ymm8, %ymm2
+; NoVLX-NEXT: vpxor %ymm6, %ymm5, %ymm3
+; NoVLX-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
+; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm2
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vpand %xmm7, %xmm2, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm0, %xmm4, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%1 = bitcast <8 x i64> %__b to <32 x i16>
@@ -9853,13 +42735,278 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuw (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuw (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1529:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1530:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1531:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $96, %rsp
+; NoVLX-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
+; NoVLX-NEXT: vmovq %xmm1, %rax
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: movq %rax, %rdx
+; NoVLX-NEXT: vmovd %eax, %xmm2
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; NoVLX-NEXT: vextracti32x4 $1, %zmm0, %xmm4
+; NoVLX-NEXT: vextracti32x4 $2, %zmm0, %xmm5
+; NoVLX-NEXT: shrq $32, %rdx
+; NoVLX-NEXT: vpinsrw $2, %edx, %xmm2, %xmm2
+; NoVLX-NEXT: vpextrq $1, %xmm1, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm5, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm3
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm5, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm4, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm5
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm4, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; NoVLX-NEXT: vmovq %xmm0, %rcx
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm6
+; NoVLX-NEXT: movl %ecx, %eax
+; NoVLX-NEXT: shrl $16, %eax
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: movq %rcx, %rax
+; NoVLX-NEXT: shrq $32, %rax
+; NoVLX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpextrq $1, %xmm0, %rax
+; NoVLX-NEXT: shrq $48, %rcx
+; NoVLX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm0
+; NoVLX-NEXT: movl %eax, %ecx
+; NoVLX-NEXT: shrl $16, %ecx
+; NoVLX-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: movq %rax, %rcx
+; NoVLX-NEXT: shrq $32, %rcx
+; NoVLX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm7
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; NoVLX-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; NoVLX-NEXT: vpmovdb %zmm0, %xmm2
+; NoVLX-NEXT: shrq $48, %rax
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm4
+; NoVLX-NEXT: vpinsrw $7, %eax, %xmm7, %xmm3
+; NoVLX-NEXT: vinserti128 $1, %xmm6, %ymm3, %ymm3
+; NoVLX-NEXT: vmovdqa {{.*#+}} ymm5 = [32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768,32768]
+; NoVLX-NEXT: vpxor %ymm5, %ymm3, %ymm3
+; NoVLX-NEXT: vpxor (%rsi), %ymm5, %ymm6
+; NoVLX-NEXT: vpcmpgtw %ymm3, %ymm6, %ymm3
+; NoVLX-NEXT: vpmovsxwd %ymm3, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm3
+; NoVLX-NEXT: vptestmd %zmm3, %zmm3, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %eax, %xmm3
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; NoVLX-NEXT: vpxor %ymm5, %ymm4, %ymm4
+; NoVLX-NEXT: vpxor 32(%rsi), %ymm5, %ymm5
+; NoVLX-NEXT: vpcmpgtw %ymm4, %ymm5, %ymm4
+; NoVLX-NEXT: vpmovsxwd %ymm4, %zmm4
+; NoVLX-NEXT: vpslld $31, %zmm4, %zmm4
+; NoVLX-NEXT: vptestmd %zmm4, %zmm4, %k0
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm4
+; NoVLX-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm4, %xmm4
+; NoVLX-NEXT: vpand %xmm2, %xmm4, %xmm2
+; NoVLX-NEXT: vpmovsxbd %xmm2, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2
+; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpand %xmm1, %xmm3, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %ecx
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: shlq $32, %rax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <32 x i16>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -9874,11 +43021,54 @@ entry:
define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -9889,11 +43079,54 @@ entry:
}
define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9905,12 +43138,73 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -9924,12 +43218,73 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -9945,11 +43300,55 @@ entry:
define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -9962,12 +43361,74 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -9984,11 +43445,53 @@ entry:
define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -9999,11 +43502,53 @@ entry:
}
define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -10015,12 +43560,72 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -10034,12 +43639,72 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -10055,11 +43720,54 @@ entry:
define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -10072,12 +43780,73 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -10094,11 +43863,42 @@ entry:
define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1532:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1533:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1534:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -10109,11 +43909,42 @@ entry:
}
define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1535:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1536:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1537:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -10125,12 +43956,61 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1538:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1539:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1540:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -10144,12 +44024,61 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1541:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1542:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1543:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -10165,11 +44094,43 @@ entry:
define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1544:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1545:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1546:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -10182,12 +44143,62 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1547:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1548:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1549:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -10204,11 +44215,49 @@ entry:
define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1550:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1551:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1552:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -10219,11 +44268,49 @@ entry:
}
define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1553:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1554:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1555:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -10235,12 +44322,68 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1556:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1557:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1558:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%1 = bitcast <2 x i64> %__b to <4 x i32>
@@ -10254,12 +44397,68 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1559:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1560:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1561:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -10275,11 +44474,50 @@ entry:
define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1562:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1563:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1564:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -10292,12 +44530,69 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1565:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1566:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1567:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
+; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x i32>
%load = load i32, i32* %__b
@@ -10314,21 +44609,23 @@ entry:
define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -10340,21 +44637,23 @@ entry:
}
define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -10367,23 +44666,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -10397,23 +44698,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -10429,21 +44732,23 @@ entry:
define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -10457,23 +44762,25 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
@@ -10490,12 +44797,72 @@ entry:
define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1568:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1569:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1570:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -10506,12 +44873,72 @@ entry:
}
define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1571:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1572:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1573:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -10523,13 +44950,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1574:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1575:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1576:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -10542,13 +45031,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1577:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1578:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1579:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -10563,12 +45114,72 @@ entry:
define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1580:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1581:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1582:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -10581,13 +45192,75 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1583:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1584:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1585:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -10603,12 +45276,77 @@ entry:
define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1586:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1587:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1588:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -10619,12 +45357,77 @@ entry:
}
define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1589:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1590:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1591:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -10636,13 +45439,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1592:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1593:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1594:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%1 = bitcast <4 x i64> %__b to <8 x i32>
@@ -10655,13 +45525,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1595:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1596:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1597:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k1, %k0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -10676,12 +45613,77 @@ entry:
define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1598:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1599:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1600:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -10694,13 +45696,80 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1601:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1602:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1603:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: kandw %k0, %k1, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x i32>
%load = load i32, i32* %__b
@@ -10716,12 +45785,120 @@ entry:
define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1604:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1605:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1606:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1607:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1608:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1609:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1610:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1611:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -10732,12 +45909,120 @@ entry:
}
define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1612:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1613:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1614:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1615:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1616:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1617:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1618:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1619:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpltud (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -10749,13 +46034,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1620:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1621:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1622:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1623:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1624:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1625:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1626:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1627:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -10768,13 +46162,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1628:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1629:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1630:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1631:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1632:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1633:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1634:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1635:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -10789,12 +46292,120 @@ entry:
define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1636:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1637:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1638:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1639:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1640:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1641:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1642:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1643:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -10807,13 +46418,122 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1644:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1645:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1646:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1647:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1648:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1649:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1650:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1651:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -10829,12 +46549,125 @@ entry:
define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1652:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1653:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1654:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1655:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1656:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1657:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1658:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1659:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -10845,12 +46678,125 @@ entry:
}
define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1660:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1661:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1662:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1663:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1664:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1665:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1666:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1667:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpltud (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -10862,13 +46808,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1668:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1669:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1670:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1671:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1672:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1673:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1674:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1675:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%1 = bitcast <8 x i64> %__b to <16 x i32>
@@ -10881,13 +46941,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1676:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1677:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1678:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1679:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1680:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1681:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1682:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1683:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -10902,12 +47076,125 @@ entry:
define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1684:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1685:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1686:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1687:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1688:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1689:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1690:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1691:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -10920,13 +47207,127 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1692:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1693:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1694:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1695:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1696:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1697:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1698:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1699:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x i32>
%load = load i32, i32* %__b
@@ -10942,12 +47343,26 @@ entry:
define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -10958,12 +47373,26 @@ entry:
}
define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -10975,13 +47404,37 @@ entry:
}
define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -10995,13 +47448,37 @@ entry:
}
define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11017,12 +47494,27 @@ entry:
define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11035,13 +47527,38 @@ entry:
}
define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11058,11 +47575,38 @@ entry:
define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -11073,11 +47617,38 @@ entry:
}
define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11089,12 +47660,49 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -11108,12 +47716,49 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11129,11 +47774,39 @@ entry:
define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11146,12 +47819,50 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11168,11 +47879,37 @@ entry:
define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -11183,11 +47920,37 @@ entry:
}
define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11199,12 +47962,48 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -11218,12 +48017,48 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11239,11 +48074,38 @@ entry:
define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11256,12 +48118,49 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11278,11 +48177,42 @@ entry:
define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1700:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1701:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1702:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -11293,11 +48223,42 @@ entry:
}
define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1703:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1704:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1705:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11309,12 +48270,53 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1706:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1707:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1708:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -11328,12 +48330,53 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1709:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1710:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1711:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11349,11 +48392,43 @@ entry:
define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1712:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1713:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1714:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11366,12 +48441,54 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1715:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1716:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1717:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11388,11 +48505,49 @@ entry:
define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1718:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1719:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1720:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -11403,11 +48558,49 @@ entry:
}
define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1721:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1722:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1723:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11419,12 +48612,60 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1724:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1725:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1726:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%1 = bitcast <2 x i64> %__b to <2 x i64>
@@ -11438,12 +48679,60 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1727:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1728:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1729:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -11459,11 +48748,50 @@ entry:
define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1730:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1731:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1732:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11476,12 +48804,61 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1733:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1734:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1735:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
+; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x i64>
%load = load i64, i64* %__b
@@ -11498,12 +48875,56 @@ entry:
define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -11514,12 +48935,56 @@ entry:
}
define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -11531,13 +48996,75 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -11551,13 +49078,75 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rsi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -11573,12 +49162,57 @@ entry:
define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -11591,13 +49225,76 @@ entry:
}
define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -11614,12 +49311,55 @@ entry:
define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -11630,12 +49370,55 @@ entry:
}
define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -11647,13 +49430,74 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -11667,13 +49511,74 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rsi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -11689,12 +49594,56 @@ entry:
define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -11707,13 +49656,75 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -11730,12 +49741,44 @@ entry:
define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1736:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1737:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1738:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -11746,12 +49789,44 @@ entry:
}
define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1739:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1740:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1741:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -11763,13 +49838,63 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1742:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1743:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1744:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -11783,13 +49908,63 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1745:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1746:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1747:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rsi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -11805,12 +49980,45 @@ entry:
define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1748:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1749:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1750:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -11823,13 +50031,64 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1751:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1752:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1753:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -11846,12 +50105,51 @@ entry:
define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1754:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1755:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1756:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -11862,12 +50160,51 @@ entry:
}
define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1757:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1758:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1759:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -11879,13 +50216,70 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1760:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1761:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1762:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%1 = bitcast <4 x i64> %__b to <4 x i64>
@@ -11899,13 +50293,70 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1763:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1764:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1765:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor (%rsi), %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -11921,12 +50372,52 @@ entry:
define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1766:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1767:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1768:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -11939,13 +50430,71 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1769:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1770:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1771:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
+; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: kmovw %edi, %k0
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kshiftlw $13, %k0, %k2
+; NoVLX-NEXT: kshiftrw $15, %k2, %k2
+; NoVLX-NEXT: kshiftlw $15, %k0, %k3
+; NoVLX-NEXT: kshiftrw $15, %k3, %k3
+; NoVLX-NEXT: kshiftlw $14, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: kmovw %k3, %ecx
+; NoVLX-NEXT: vmovd %ecx, %xmm1
+; NoVLX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k2, %eax
+; NoVLX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; NoVLX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x i64>
%load = load i64, i64* %__b
@@ -11962,12 +50511,20 @@ entry:
define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -11978,12 +50535,20 @@ entry:
}
define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -11995,13 +50560,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -12014,13 +50588,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -12035,12 +50618,20 @@ entry:
define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -12053,13 +50644,22 @@ entry:
}
define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -12075,12 +50675,70 @@ entry:
define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1772:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1773:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1774:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -12091,12 +50749,70 @@ entry:
}
define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1775:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1776:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1777:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -12108,13 +50824,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1778:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1779:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1780:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -12127,13 +50902,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1781:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1782:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1783:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -12148,12 +50982,70 @@ entry:
define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1784:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1785:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1786:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -12166,13 +51058,72 @@ entry:
}
define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1787:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1788:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1789:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -12188,12 +51139,75 @@ entry:
define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1790:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1791:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1792:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -12204,12 +51218,75 @@ entry:
}
define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1793:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1794:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1795:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -12221,13 +51298,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1796:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1797:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1798:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%1 = bitcast <8 x i64> %__b to <8 x i64>
@@ -12240,13 +51381,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1799:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1800:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1801:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -12261,12 +51466,75 @@ entry:
define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1802:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1803:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1804:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -12279,13 +51547,77 @@ entry:
}
define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: kmovd %edi, %k1
+; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1805:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1806:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1807:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x i64>
%load = load i64, i64* %__b
@@ -12302,11 +51634,51 @@ entry:
declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32, i16, i32)
define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%1 = bitcast <2 x i64> %__b to <4 x float>
@@ -12317,11 +51689,51 @@ entry:
}
define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -12333,11 +51745,52 @@ entry:
}
define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
+; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%load = load float, float* %__b
@@ -12351,11 +51804,50 @@ entry:
define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%1 = bitcast <2 x i64> %__b to <4 x float>
@@ -12366,11 +51858,50 @@ entry:
}
define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -12382,11 +51913,51 @@ entry:
}
define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
+; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%load = load float, float* %__b
@@ -12400,11 +51971,39 @@ entry:
define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1808:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1809:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1810:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%1 = bitcast <2 x i64> %__b to <4 x float>
@@ -12415,11 +52014,39 @@ entry:
}
define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1811:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1812:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1813:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -12431,11 +52058,40 @@ entry:
}
define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1814:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1815:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1816:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
+; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%load = load float, float* %__b
@@ -12449,11 +52105,46 @@ entry:
define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1817:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1818:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1819:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%1 = bitcast <2 x i64> %__b to <4 x float>
@@ -12464,11 +52155,46 @@ entry:
}
define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1820:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1821:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1822:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -12480,11 +52206,47 @@ entry:
}
define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1823:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1824:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1825:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
+; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <4 x float>
%load = load float, float* %__b
@@ -12498,21 +52260,23 @@ entry:
define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
@@ -12524,21 +52288,23 @@ entry:
}
define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
@@ -12551,21 +52317,23 @@ entry:
}
define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
-; NoVLX: ## BB#0: ## %entry
-; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
@@ -12580,12 +52348,72 @@ entry:
define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1826:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1827:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1828:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
%1 = bitcast <4 x i64> %__b to <8 x float>
@@ -12596,12 +52424,72 @@ entry:
}
define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1829:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1830:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1831:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovaps (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -12613,12 +52501,72 @@ entry:
}
define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1832:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1833:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1834:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
%load = load float, float* %__b
@@ -12632,12 +52580,77 @@ entry:
define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1835:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1836:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1837:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
%1 = bitcast <4 x i64> %__b to <8 x float>
@@ -12648,12 +52661,77 @@ entry:
}
define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1838:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1839:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1840:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovaps (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -12665,12 +52743,77 @@ entry:
}
define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1841:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1842:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1843:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <8 x float>
%load = load float, float* %__b
@@ -12684,12 +52827,120 @@ entry:
define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1844:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1845:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1846:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1847:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1848:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1849:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1850:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1851:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x float>
%1 = bitcast <8 x i64> %__b to <16 x float>
@@ -12700,12 +52951,120 @@ entry:
}
define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1852:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1853:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1854:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1855:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1856:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1857:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1858:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1859:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vcmpeqps (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x float>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -12717,12 +53076,120 @@ entry:
}
define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1860:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1861:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1862:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: .Lcfi1863:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1864:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1865:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1866:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1867:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x float>
%load = load float, float* %__b
@@ -12736,12 +53203,18 @@ entry:
define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_sae_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v16i1_v32i1_sae_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovw %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v16i1_v32i1_sae_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x float>
%1 = bitcast <8 x i64> %__b to <16 x float>
@@ -12752,12 +53225,125 @@ entry:
define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1868:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1869:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1870:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1871:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1872:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1873:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1874:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1875:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x float>
%1 = bitcast <8 x i64> %__b to <16 x float>
@@ -12768,12 +53354,125 @@ entry:
}
define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1876:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1877:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1878:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1879:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1880:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1881:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1882:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1883:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vcmpeqps (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x float>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -12785,12 +53484,125 @@ entry:
}
define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, float* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1884:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1885:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1886:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: pushq %r15
+; NoVLX-NEXT: pushq %r14
+; NoVLX-NEXT: pushq %r13
+; NoVLX-NEXT: pushq %r12
+; NoVLX-NEXT: pushq %rbx
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: .Lcfi1887:
+; NoVLX-NEXT: .cfi_offset %rbx, -56
+; NoVLX-NEXT: .Lcfi1888:
+; NoVLX-NEXT: .cfi_offset %r12, -48
+; NoVLX-NEXT: .Lcfi1889:
+; NoVLX-NEXT: .cfi_offset %r13, -40
+; NoVLX-NEXT: .Lcfi1890:
+; NoVLX-NEXT: .cfi_offset %r14, -32
+; NoVLX-NEXT: .Lcfi1891:
+; NoVLX-NEXT: .cfi_offset %r15, -24
+; NoVLX-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r11d
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r14d
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r15d
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r12d
+; NoVLX-NEXT: kshiftlw $8, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r13d
+; NoVLX-NEXT: kshiftlw $7, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $6, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ebx
+; NoVLX-NEXT: kshiftlw $5, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $4, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $3, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $2, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vmovd %r10d, %xmm0
+; NoVLX-NEXT: kmovw %k1, %r10d
+; NoVLX-NEXT: kshiftlw $1, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $13, %r10d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: leaq -40(%rbp), %rsp
+; NoVLX-NEXT: popq %rbx
+; NoVLX-NEXT: popq %r12
+; NoVLX-NEXT: popq %r13
+; NoVLX-NEXT: popq %r14
+; NoVLX-NEXT: popq %r15
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x float>
%load = load float, float* %__b
@@ -12804,13 +53616,20 @@ entry:
define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_sae_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqps_v16i1_v64i1_sae_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: movzwl %ax, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v16i1_v64i1_sae_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movzwl %ax, %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <16 x float>
%1 = bitcast <8 x i64> %__b to <16 x float>
@@ -12822,12 +53641,23 @@ entry:
declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i8, i32)
define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%1 = bitcast <2 x i64> %__b to <2 x double>
@@ -12838,12 +53668,23 @@ entry:
}
define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -12855,12 +53696,24 @@ entry:
}
define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load double, double* %__b
@@ -12874,11 +53727,35 @@ entry:
define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%1 = bitcast <2 x i64> %__b to <2 x double>
@@ -12889,11 +53766,35 @@ entry:
}
define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -12905,11 +53806,36 @@ entry:
}
define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load double, double* %__b
@@ -12923,11 +53849,34 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%1 = bitcast <2 x i64> %__b to <2 x double>
@@ -12938,11 +53887,34 @@ entry:
}
define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -12954,11 +53926,35 @@ entry:
}
define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load double, double* %__b
@@ -12972,11 +53968,39 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1892:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1893:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1894:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%1 = bitcast <2 x i64> %__b to <2 x double>
@@ -12987,11 +54011,39 @@ entry:
}
define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1895:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1896:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1897:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -13003,11 +54055,40 @@ entry:
}
define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1898:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1899:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1900:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load double, double* %__b
@@ -13021,11 +54102,46 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1901:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1902:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1903:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%1 = bitcast <2 x i64> %__b to <2 x double>
@@ -13036,11 +54152,46 @@ entry:
}
define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1904:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1905:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1906:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load <2 x i64>, <2 x i64>* %__b
@@ -13052,11 +54203,47 @@ entry:
}
define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1907:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1908:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1909:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <2 x i64> %__a to <2 x double>
%load = load double, double* %__b
@@ -13070,12 +54257,53 @@ entry:
define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%1 = bitcast <4 x i64> %__b to <4 x double>
@@ -13086,12 +54314,53 @@ entry:
}
define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -13103,12 +54372,54 @@ entry:
}
define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kshiftlw $7, %k0, %k0
+; NoVLX-NEXT: kshiftrw $7, %k0, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpsllq $63, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; NoVLX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%load = load double, double* %__b
@@ -13122,12 +54433,52 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%1 = bitcast <4 x i64> %__b to <4 x double>
@@ -13138,12 +54489,52 @@ entry:
}
define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -13155,12 +54546,53 @@ entry:
}
define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $0, %xmm0, %eax
+; NoVLX-NEXT: andl $1, %eax
+; NoVLX-NEXT: kmovw %eax, %k0
+; NoVLX-NEXT: kxorw %k0, %k0, %k1
+; NoVLX-NEXT: kshiftrw $1, %k1, %k1
+; NoVLX-NEXT: kshiftlw $1, %k1, %k1
+; NoVLX-NEXT: korw %k0, %k1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,16,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,1,16,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
+; NoVLX-NEXT: vpslld $31, %zmm3, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NoVLX-NEXT: vpextrb $12, %xmm0, %eax
+; NoVLX-NEXT: kmovw %eax, %k1
+; NoVLX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NoVLX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,16,4,5,6,7,8,9,10,11,12,13,14,15]
+; NoVLX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%load = load double, double* %__b
@@ -13174,12 +54606,41 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1910:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1911:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1912:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%1 = bitcast <4 x i64> %__b to <4 x double>
@@ -13190,12 +54651,41 @@ entry:
}
define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1913:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1914:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1915:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -13207,12 +54697,42 @@ entry:
}
define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1916:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1917:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1918:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%load = load double, double* %__b
@@ -13226,12 +54746,48 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1919:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1920:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1921:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%1 = bitcast <4 x i64> %__b to <4 x double>
@@ -13242,12 +54798,48 @@ entry:
}
define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1922:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1923:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1924:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%load = load <4 x i64>, <4 x i64>* %__b
@@ -13259,12 +54851,49 @@ entry:
}
define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1925:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1926:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1927:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
+; NoVLX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; NoVLX-NEXT: vpmovsxbd %xmm1, %zmm1
+; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1
+; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0
+; NoVLX-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__a to <4 x double>
%load = load double, double* %__b
@@ -13278,12 +54907,20 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%1 = bitcast <8 x i64> %__b to <8 x double>
@@ -13294,12 +54931,20 @@ entry:
}
define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -13311,12 +54956,20 @@ entry:
}
define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%load = load double, double* %__b
@@ -13330,12 +54983,22 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_sae_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_sae_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: movzbl %al, %eax
+; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_sae_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movzbl %al, %eax
+; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%1 = bitcast <8 x i64> %__b to <8 x double>
@@ -13346,12 +55009,70 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1928:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1929:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1930:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%1 = bitcast <8 x i64> %__b to <8 x double>
@@ -13362,12 +55083,70 @@ entry:
}
define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1931:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1932:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1933:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -13379,12 +55158,70 @@ entry:
}
define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1934:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1935:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1936:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $32, %rsp
+; NoVLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%load = load double, double* %__b
@@ -13398,12 +55235,19 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_sae_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_sae_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovb %k0, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_sae_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movzbl %al, %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%1 = bitcast <8 x i64> %__b to <8 x double>
@@ -13414,12 +55258,75 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1937:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1938:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1939:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%1 = bitcast <8 x i64> %__b to <8 x double>
@@ -13430,12 +55337,75 @@ entry:
}
define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1940:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1941:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1942:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%load = load <8 x i64>, <8 x i64>* %__b
@@ -13447,12 +55417,75 @@ entry:
}
define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, %rax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; VLX-NEXT: kmovq %k0, %rax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: pushq %rbp
+; NoVLX-NEXT: .Lcfi1943:
+; NoVLX-NEXT: .cfi_def_cfa_offset 16
+; NoVLX-NEXT: .Lcfi1944:
+; NoVLX-NEXT: .cfi_offset %rbp, -16
+; NoVLX-NEXT: movq %rsp, %rbp
+; NoVLX-NEXT: .Lcfi1945:
+; NoVLX-NEXT: .cfi_def_cfa_register %rbp
+; NoVLX-NEXT: andq $-32, %rsp
+; NoVLX-NEXT: subq $64, %rsp
+; NoVLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; NoVLX-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k1
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
+; NoVLX-NEXT: kshiftlw $15, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r8d
+; NoVLX-NEXT: kshiftlw $14, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %r9d
+; NoVLX-NEXT: kshiftlw $13, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edx
+; NoVLX-NEXT: kshiftlw $12, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %esi
+; NoVLX-NEXT: kshiftlw $11, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %edi
+; NoVLX-NEXT: kshiftlw $10, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %eax
+; NoVLX-NEXT: kshiftlw $9, %k0, %k1
+; NoVLX-NEXT: kshiftrw $15, %k1, %k1
+; NoVLX-NEXT: kmovw %k1, %ecx
+; NoVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $0, %r8d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $1, %r9d, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $4, %edi, %xmm0, %xmm0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $15, %k0, %k0
+; NoVLX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0
+; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0
+; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, (%rsp)
+; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; NoVLX-NEXT: shlq $32, %rcx
+; NoVLX-NEXT: movl (%rsp), %eax
+; NoVLX-NEXT: orq %rcx, %rax
+; NoVLX-NEXT: movq %rbp, %rsp
+; NoVLX-NEXT: popq %rbp
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%load = load double, double* %__b
@@ -13466,13 +55499,20 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
-; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_sae_mask:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
+; VLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_sae_mask:
+; VLX: # BB#0: # %entry
+; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; VLX-NEXT: kmovd %k0, %eax
+; VLX-NEXT: movzbl %al, %eax
+; VLX-NEXT: vzeroupper
+; VLX-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_sae_mask:
+; NoVLX: # BB#0: # %entry
+; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: movzbl %al, %eax
+; NoVLX-NEXT: retq
entry:
%0 = bitcast <8 x i64> %__a to <8 x double>
%1 = bitcast <8 x i64> %__b to <8 x double>
diff --git a/test/CodeGen/X86/bitcast-and-setcc-128.ll b/test/CodeGen/X86/bitcast-and-setcc-128.ll
index 092b139fca2f..1d78ee26a0b9 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-128.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-128.ll
@@ -1,48 +1,48 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX12,AVX1
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=AVX12,AVX2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512
define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; SSE2-LABEL: v8i16:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-NEXT: pcmpgtw %xmm3, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; SSE2-NEXT: packuswb %xmm2, %xmm2
; SSE2-NEXT: pmovmskb %xmm2, %eax
-; SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v8i16:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSSE3-NEXT: pcmpgtw %xmm3, %xmm2
; SSSE3-NEXT: pand %xmm0, %xmm2
; SSSE3-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v8i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: retq
%x0 = icmp sgt <8 x i16> %a, %b
%x1 = icmp sgt <8 x i16> %c, %d
@@ -53,25 +53,25 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; SSE2-SSSE3-LABEL: v4i32:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4i32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
@@ -87,25 +87,25 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) {
; SSE2-SSSE3-LABEL: v4f32:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1
; SSE2-SSSE3-NEXT: cmpltps %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andps %xmm1, %xmm3
; SSE2-SSSE3-NEXT: movmskps %xmm3, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vcmpltps %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4f32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512-NEXT: vcmpltps %xmm2, %xmm3, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
@@ -121,29 +121,29 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d)
define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; SSE2-SSSE3-LABEL: v16i8:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v16i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpgtb %xmm3, %xmm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX512-NEXT: retq
%x0 = icmp sgt <16 x i8> %a, %b
%x1 = icmp sgt <16 x i8> %c, %d
@@ -154,7 +154,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; SSE2-SSSE3-LABEL: v2i8:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: psllq $56, %xmm2
; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm4
; SSE2-SSSE3-NEXT: psrad $31, %xmm4
@@ -206,11 +206,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vpsllq $56, %xmm3, %xmm3
; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX1-NEXT: vpsrad $24, %xmm3, %xmm3
@@ -235,11 +235,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpsllq $56, %xmm3, %xmm3
; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX2-NEXT: vpsrad $24, %xmm3, %xmm3
@@ -264,11 +264,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: retq
;
; AVX512-LABEL: v2i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpsllq $56, %xmm3, %xmm3
; AVX512-NEXT: vpsraq $56, %xmm3, %xmm3
; AVX512-NEXT: vpsllq $56, %xmm2, %xmm2
@@ -292,7 +292,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; SSE2-SSSE3-LABEL: v2i16:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: psllq $48, %xmm2
; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm4
; SSE2-SSSE3-NEXT: psrad $31, %xmm4
@@ -344,11 +344,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vpsllq $48, %xmm3, %xmm3
; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX1-NEXT: vpsrad $16, %xmm3, %xmm3
@@ -373,11 +373,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpsllq $48, %xmm3, %xmm3
; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX2-NEXT: vpsrad $16, %xmm3, %xmm3
@@ -402,11 +402,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: retq
;
; AVX512-LABEL: v2i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpsllq $48, %xmm3, %xmm3
; AVX512-NEXT: vpsraq $48, %xmm3, %xmm3
; AVX512-NEXT: vpsllq $48, %xmm2, %xmm2
@@ -430,7 +430,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; SSE2-SSSE3-LABEL: v2i32:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: psllq $32, %xmm2
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
@@ -474,11 +474,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
@@ -499,11 +499,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpsllq $32, %xmm3, %xmm3
; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
@@ -524,11 +524,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: retq
;
; AVX512-LABEL: v2i32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpsllq $32, %xmm3, %xmm3
; AVX512-NEXT: vpsraq $32, %xmm3, %xmm3
; AVX512-NEXT: vpsllq $32, %xmm2, %xmm2
@@ -552,7 +552,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
; SSE2-SSSE3-LABEL: v2i64:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm1
; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm0
@@ -576,20 +576,20 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v2i64:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
@@ -605,25 +605,25 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) {
; SSE2-SSSE3-LABEL: v2f64:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1
; SSE2-SSSE3-NEXT: cmpltpd %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andpd %xmm1, %xmm3
; SSE2-SSSE3-NEXT: movmskpd %xmm3, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vcmpltpd %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandpd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v2f64:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512-NEXT: vcmpltpd %xmm2, %xmm3, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
@@ -639,7 +639,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double>
define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; SSE2-SSSE3-LABEL: v4i8:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pslld $24, %xmm3
; SSE2-SSSE3-NEXT: psrad $24, %xmm3
; SSE2-SSSE3-NEXT: pslld $24, %xmm2
@@ -652,11 +652,11 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpslld $24, %xmm3, %xmm3
; AVX12-NEXT: vpsrad $24, %xmm3, %xmm3
; AVX12-NEXT: vpslld $24, %xmm2, %xmm2
@@ -669,11 +669,11 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpslld $24, %xmm3, %xmm3
; AVX512-NEXT: vpsrad $24, %xmm3, %xmm3
; AVX512-NEXT: vpslld $24, %xmm2, %xmm2
@@ -697,7 +697,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; SSE2-SSSE3-LABEL: v4i16:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pslld $16, %xmm3
; SSE2-SSSE3-NEXT: psrad $16, %xmm3
; SSE2-SSSE3-NEXT: pslld $16, %xmm2
@@ -710,11 +710,11 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpslld $16, %xmm3, %xmm3
; AVX12-NEXT: vpsrad $16, %xmm3, %xmm3
; AVX12-NEXT: vpslld $16, %xmm2, %xmm2
@@ -727,11 +727,11 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpslld $16, %xmm3, %xmm3
; AVX512-NEXT: vpsrad $16, %xmm3, %xmm3
; AVX512-NEXT: vpslld $16, %xmm2, %xmm2
@@ -755,7 +755,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; SSE2-LABEL: v8i8:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: psllw $8, %xmm3
; SSE2-NEXT: psraw $8, %xmm3
; SSE2-NEXT: psllw $8, %xmm2
@@ -770,11 +770,11 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: pmovmskb %xmm0, %eax
-; SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v8i8:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: psllw $8, %xmm3
; SSSE3-NEXT: psraw $8, %xmm3
; SSSE3-NEXT: psllw $8, %xmm2
@@ -788,11 +788,11 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; SSSE3-NEXT: pand %xmm2, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpsllw $8, %xmm3, %xmm3
; AVX12-NEXT: vpsraw $8, %xmm3, %xmm3
; AVX12-NEXT: vpsllw $8, %xmm2, %xmm2
@@ -806,11 +806,11 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v8i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpsllw $8, %xmm3, %xmm3
; AVX512-NEXT: vpsraw $8, %xmm3, %xmm3
; AVX512-NEXT: vpsllw $8, %xmm2, %xmm2
@@ -822,7 +822,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: retq
%x0 = icmp sgt <8 x i8> %a, %b
%x1 = icmp sgt <8 x i8> %c, %d
diff --git a/test/CodeGen/X86/bitcast-and-setcc-256.ll b/test/CodeGen/X86/bitcast-and-setcc-256.ll
index a6d6ca155302..95529686a58a 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+SSE2 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+SSSE3 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX12,AVX1
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=AVX12,AVX2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+SSE2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+SSSE3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefix=AVX512
define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; SSE2-SSSE3-LABEL: v4i64:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm3
; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm1
@@ -57,11 +57,11 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -74,12 +74,12 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskps %xmm0, %eax
-; AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: v4i64:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -88,12 +88,12 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskps %xmm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v4i64:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpgtq %ymm3, %ymm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
@@ -110,7 +110,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> %d) {
; SSE2-SSSE3-LABEL: v4f64:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm1, %xmm3
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
@@ -123,11 +123,11 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
; SSE2-SSSE3-NEXT: psrad $31, %xmm6
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm6
; SSE2-SSSE3-NEXT: movmskps %xmm6, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX12-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -136,12 +136,12 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
; AVX12-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4f64:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vcmpltpd %ymm2, %ymm3, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
@@ -158,7 +158,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; SSE2-LABEL: v16i16:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: pcmpgtw %xmm3, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm3, %xmm1
@@ -181,11 +181,11 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; SSE2-NEXT: pcmpgtb %xmm4, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: pmovmskb %xmm2, %eax
-; SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v16i16:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: pcmpgtw %xmm3, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSSE3-NEXT: pshufb %xmm3, %xmm1
@@ -208,11 +208,11 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; SSSE3-NEXT: pcmpgtb %xmm4, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSSE3-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
@@ -225,12 +225,12 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: v16i16:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -239,16 +239,16 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v16i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpgtw %ymm3, %ymm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x0 = icmp sgt <16 x i16> %a, %b
@@ -260,7 +260,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE2-LABEL: v8i32:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: pcmpgtd %xmm3, %xmm1
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
@@ -287,11 +287,11 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; SSE2-NEXT: packuswb %xmm2, %xmm2
; SSE2-NEXT: pmovmskb %xmm2, %eax
-; SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v8i32:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: pcmpgtd %xmm3, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSSE3-NEXT: pshufb %xmm3, %xmm1
@@ -310,11 +310,11 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSSE3-NEXT: pand %xmm0, %xmm4
; SSSE3-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pmovmskb %xmm4, %eax
-; SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
@@ -328,12 +328,12 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: v8i32:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -343,16 +343,16 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v8i32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x0 = icmp sgt <8 x i32> %a, %b
@@ -364,7 +364,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) {
; SSE2-LABEL: v8f32:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: cmpltps %xmm1, %xmm3
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
@@ -391,11 +391,11 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; SSE2-NEXT: packuswb %xmm2, %xmm2
; SSE2-NEXT: pmovmskb %xmm2, %eax
-; SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v8f32:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: cmpltps %xmm1, %xmm3
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSSE3-NEXT: pshufb %xmm1, %xmm3
@@ -414,11 +414,11 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; SSSE3-NEXT: pand %xmm2, %xmm6
; SSSE3-NEXT: pshufb {{.*#+}} xmm6 = xmm6[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pmovmskb %xmm6, %eax
-; SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX12-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -428,16 +428,16 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
; AVX512-LABEL: v8f32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x0 = fcmp ogt <8 x float> %a, %b
@@ -449,7 +449,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; SSE2-SSSE3-LABEL: v32i8:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm1
; SSE2-SSSE3-NEXT: pcmpgtb %xmm6, %xmm4
@@ -561,14 +561,14 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v32i8:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: Lcfi0:
+; AVX1-NEXT: .Lcfi0:
; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: Lcfi1:
+; AVX1-NEXT: .Lcfi1:
; AVX1-NEXT: .cfi_offset %rbp, -16
; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: Lcfi2:
+; AVX1-NEXT: .Lcfi2:
; AVX1-NEXT: .cfi_def_cfa_register %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $32, %rsp
@@ -687,7 +687,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v32i8:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -696,7 +696,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: v32i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpgtb %ymm3, %ymm2, %k0 {%k1}
; AVX512-NEXT: kmovd %k0, %eax
diff --git a/test/CodeGen/X86/bitcast-and-setcc-512.ll b/test/CodeGen/X86/bitcast-and-setcc-512.ll
new file mode 100644
index 000000000000..2eba79b0297f
--- /dev/null
+++ b/test/CodeGen/X86/bitcast-and-setcc-512.ll
@@ -0,0 +1,1868 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW
+
+define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
+; SSE-LABEL: v8i64:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: pcmpgtq %xmm7, %xmm3
+; SSE-NEXT: pcmpgtq %xmm6, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE-NEXT: pslld $31, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE-NEXT: pshufb %xmm3, %xmm2
+; SSE-NEXT: pcmpgtq %xmm5, %xmm1
+; SSE-NEXT: pcmpgtq %xmm4, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: pslld $31, %xmm0
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pshufb %xmm3, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE-NEXT: psllw $15, %xmm0
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2]
+; SSE-NEXT: pslld $31, %xmm9
+; SSE-NEXT: psrad $31, %xmm9
+; SSE-NEXT: pshufb %xmm3, %xmm9
+; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm10[0,2]
+; SSE-NEXT: pslld $31, %xmm8
+; SSE-NEXT: psrad $31, %xmm8
+; SSE-NEXT: pshufb %xmm3, %xmm8
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
+; SSE-NEXT: psllw $15, %xmm8
+; SSE-NEXT: psraw $15, %xmm8
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: pshufb {{.*#+}} xmm8 = xmm8[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSE-NEXT: pmovmskb %xmm8, %eax
+; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v8i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm9
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm8, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm9[0]
+; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm2
+; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm3
+; AVX1-NEXT: vpacksswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1
+; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v8i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpgtq %ymm7, %ymm5, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtq %ymm6, %ymm4, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2-NEXT: vpacksswb %xmm4, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT: vpsllw $15, %xmm1, %xmm1
+; AVX2-NEXT: vpsraw $15, %xmm1, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v8i64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v8i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x0 = icmp sgt <8 x i64> %a, %b
+ %x1 = icmp sgt <8 x i64> %c, %d
+ %y = and <8 x i1> %x0, %x1
+ %res = bitcast <8 x i1> %y to i8
+ ret i8 %res
+}
+
+define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d) {
+; SSE-LABEL: v8f64:
+; SSE: # BB#0:
+; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: cmpltpd %xmm3, %xmm7
+; SSE-NEXT: cmpltpd %xmm2, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
+; SSE-NEXT: pslld $31, %xmm6
+; SSE-NEXT: psrad $31, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE-NEXT: pshufb %xmm2, %xmm6
+; SSE-NEXT: cmpltpd %xmm1, %xmm5
+; SSE-NEXT: cmpltpd %xmm0, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
+; SSE-NEXT: pslld $31, %xmm4
+; SSE-NEXT: psrad $31, %xmm4
+; SSE-NEXT: pshufb %xmm2, %xmm4
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; SSE-NEXT: psllw $15, %xmm4
+; SSE-NEXT: psraw $15, %xmm4
+; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2]
+; SSE-NEXT: pslld $31, %xmm9
+; SSE-NEXT: psrad $31, %xmm9
+; SSE-NEXT: pshufb %xmm2, %xmm9
+; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm10[0,2]
+; SSE-NEXT: pslld $31, %xmm8
+; SSE-NEXT: psrad $31, %xmm8
+; SSE-NEXT: pshufb %xmm2, %xmm8
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
+; SSE-NEXT: psllw $15, %xmm8
+; SSE-NEXT: psraw $15, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pshufb {{.*#+}} xmm8 = xmm8[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSE-NEXT: pmovmskb %xmm8, %eax
+; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX12-LABEL: v8f64:
+; AVX12: # BB#0:
+; AVX12-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
+; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX12-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX12-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX12-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
+; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX12-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX12-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX12-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX12-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX12-NEXT: vcmpltpd %ymm5, %ymm7, %ymm1
+; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX12-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
+; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX12-NEXT: vcmpltpd %ymm4, %ymm6, %ymm2
+; AVX12-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX12-NEXT: vpacksswb %xmm4, %xmm2, %xmm2
+; AVX12-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX12-NEXT: vpsllw $15, %xmm1, %xmm1
+; AVX12-NEXT: vpsraw $15, %xmm1, %xmm1
+; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX12-NEXT: vpmovmskb %xmm0, %eax
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: vzeroupper
+; AVX12-NEXT: retq
+;
+; AVX512F-LABEL: v8f64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
+; AVX512F-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v8f64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
+; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x0 = fcmp ogt <8 x double> %a, %b
+ %x1 = fcmp ogt <8 x double> %c, %d
+ %y = and <8 x i1> %x0, %x1
+ %res = bitcast <8 x i1> %y to i8
+ ret i8 %res
+}
+
+define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
+; SSE-LABEL: v32i16:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: pcmpgtw %xmm5, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE-NEXT: pshufb %xmm5, %xmm1
+; SSE-NEXT: pcmpgtw %xmm4, %xmm0
+; SSE-NEXT: pshufb %xmm5, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: pcmpgtw %xmm7, %xmm3
+; SSE-NEXT: pshufb %xmm5, %xmm3
+; SSE-NEXT: pcmpgtw %xmm6, %xmm2
+; SSE-NEXT: pshufb %xmm5, %xmm2
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: pshufb %xmm5, %xmm11
+; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: pshufb %xmm5, %xmm8
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm11[0]
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: pshufb %xmm5, %xmm10
+; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: pshufb %xmm5, %xmm9
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm10[0]
+; SSE-NEXT: pand %xmm2, %xmm9
+; SSE-NEXT: pextrb $15, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $15, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; SSE-NEXT: shll $16, %ecx
+; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: orl %ecx, %eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v32i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi0:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi1:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi2:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $32, %rsp
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT: vpcmpgtw %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpcmpgtw %xmm7, %xmm5, %xmm2
+; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm6, %xmm4, %xmm3
+; AVX1-NEXT: vpacksswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: movl (%rsp), %eax
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v32i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v32i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: pushq %rbp
+; AVX512F-NEXT: .Lcfi0:
+; AVX512F-NEXT: .cfi_def_cfa_offset 16
+; AVX512F-NEXT: .Lcfi1:
+; AVX512F-NEXT: .cfi_offset %rbp, -16
+; AVX512F-NEXT: movq %rsp, %rbp
+; AVX512F-NEXT: .Lcfi2:
+; AVX512F-NEXT: .cfi_def_cfa_register %rbp
+; AVX512F-NEXT: andq $-32, %rsp
+; AVX512F-NEXT: subq $32, %rsp
+; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm1
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm0
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm1
+; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm1
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm2
+; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
+; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm2
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, (%rsp)
+; AVX512F-NEXT: movl (%rsp), %eax
+; AVX512F-NEXT: movq %rbp, %rsp
+; AVX512F-NEXT: popq %rbp
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v32i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpcmpgtw %zmm3, %zmm2, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x0 = icmp sgt <32 x i16> %a, %b
+ %x1 = icmp sgt <32 x i16> %c, %d
+ %y = and <32 x i1> %x0, %x1
+ %res = bitcast <32 x i1> %y to i32
+ ret i32 %res
+}
+
+define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
+; SSE-LABEL: v16i32:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: pcmpgtd %xmm7, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE-NEXT: pshufb %xmm7, %xmm3
+; SSE-NEXT: pcmpgtd %xmm6, %xmm2
+; SSE-NEXT: pshufb %xmm7, %xmm2
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT: psllw $15, %xmm2
+; SSE-NEXT: psraw $15, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE-NEXT: pshufb %xmm3, %xmm2
+; SSE-NEXT: pcmpgtd %xmm5, %xmm1
+; SSE-NEXT: pshufb %xmm7, %xmm1
+; SSE-NEXT: pcmpgtd %xmm4, %xmm0
+; SSE-NEXT: pshufb %xmm7, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: psllw $15, %xmm0
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pshufb %xmm3, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE-NEXT: psllw $7, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: pcmpgtb %xmm0, %xmm4
+; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: pshufb %xmm7, %xmm11
+; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: pshufb %xmm7, %xmm9
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; SSE-NEXT: psllw $15, %xmm9
+; SSE-NEXT: psraw $15, %xmm9
+; SSE-NEXT: pshufb %xmm3, %xmm9
+; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: pshufb %xmm7, %xmm10
+; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: pshufb %xmm7, %xmm8
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
+; SSE-NEXT: psllw $15, %xmm8
+; SSE-NEXT: psraw $15, %xmm8
+; SSE-NEXT: pshufb %xmm3, %xmm8
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
+; SSE-NEXT: psllw $7, %xmm8
+; SSE-NEXT: pand %xmm2, %xmm8
+; SSE-NEXT: pcmpgtb %xmm8, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v16i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm8, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm9
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm8, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm9[0]
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vpand %xmm9, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm1
+; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtd %xmm7, %xmm5, %xmm3
+; AVX1-NEXT: vpacksswb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpcmpgtd %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpacksswb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpshufb %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm9, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v16i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpcmpgtd %ymm7, %ymm5, %ymm5
+; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm7
+; AVX2-NEXT: vpacksswb %xmm7, %xmm5, %xmm5
+; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5
+; AVX2-NEXT: vpcmpgtd %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
+; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; AVX2-NEXT: vpsllw $7, %xmm3, %xmm3
+; AVX2-NEXT: vpand %xmm1, %xmm3, %xmm1
+; AVX2-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v16i32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v16i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x0 = icmp sgt <16 x i32> %a, %b
+ %x1 = icmp sgt <16 x i32> %c, %d
+ %y = and <16 x i1> %x0, %x1
+ %res = bitcast <16 x i1> %y to i16
+ ret i16 %res
+}
+
+define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x float> %d) {
+; SSE-LABEL: v16f32:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: cmpltps %xmm3, %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE-NEXT: pshufb %xmm3, %xmm7
+; SSE-NEXT: cmpltps %xmm2, %xmm6
+; SSE-NEXT: pshufb %xmm3, %xmm6
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
+; SSE-NEXT: psllw $15, %xmm6
+; SSE-NEXT: psraw $15, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE-NEXT: pshufb %xmm2, %xmm6
+; SSE-NEXT: cmpltps %xmm1, %xmm5
+; SSE-NEXT: pshufb %xmm3, %xmm5
+; SSE-NEXT: cmpltps %xmm0, %xmm4
+; SSE-NEXT: pshufb %xmm3, %xmm4
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
+; SSE-NEXT: psllw $15, %xmm4
+; SSE-NEXT: psraw $15, %xmm4
+; SSE-NEXT: pshufb %xmm2, %xmm4
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; SSE-NEXT: psllw $7, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: pcmpgtb %xmm4, %xmm5
+; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: pshufb %xmm3, %xmm11
+; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: pshufb %xmm3, %xmm9
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; SSE-NEXT: psllw $15, %xmm9
+; SSE-NEXT: psraw $15, %xmm9
+; SSE-NEXT: pshufb %xmm2, %xmm9
+; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: pshufb %xmm3, %xmm10
+; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: pshufb %xmm3, %xmm8
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
+; SSE-NEXT: psllw $15, %xmm8
+; SSE-NEXT: psraw $15, %xmm8
+; SSE-NEXT: pshufb %xmm2, %xmm8
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
+; SSE-NEXT: psllw $7, %xmm8
+; SSE-NEXT: pand %xmm1, %xmm8
+; SSE-NEXT: pcmpgtb %xmm8, %xmm0
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX12-LABEL: v16f32:
+; AVX12: # BB#0:
+; AVX12-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
+; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX12-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX12-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX12-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
+; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX12-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX12-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX12-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX12-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX12-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX12-NEXT: vcmpltps %ymm5, %ymm7, %ymm5
+; AVX12-NEXT: vextractf128 $1, %ymm5, %xmm7
+; AVX12-NEXT: vpacksswb %xmm7, %xmm5, %xmm5
+; AVX12-NEXT: vpshufb %xmm3, %xmm5, %xmm5
+; AVX12-NEXT: vcmpltps %ymm4, %ymm6, %ymm4
+; AVX12-NEXT: vextractf128 $1, %ymm4, %xmm6
+; AVX12-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
+; AVX12-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; AVX12-NEXT: vpsllw $7, %xmm3, %xmm3
+; AVX12-NEXT: vpand %xmm1, %xmm3, %xmm1
+; AVX12-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
+; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT: vpmovmskb %xmm0, %eax
+; AVX12-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX12-NEXT: vzeroupper
+; AVX12-NEXT: retq
+;
+; AVX512F-LABEL: v16f32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
+; AVX512F-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v16f32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1
+; AVX512BW-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x0 = fcmp ogt <16 x float> %a, %b
+ %x1 = fcmp ogt <16 x float> %c, %d
+ %y = and <16 x i1> %x0, %x1
+ %res = bitcast <16 x i1> %y to i16
+ ret i16 %res
+}
+
+define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
+; SSE-LABEL: v64i8:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: pcmpgtb %xmm6, %xmm2
+; SSE-NEXT: pcmpgtb %xmm7, %xmm3
+; SSE-NEXT: pcmpgtb %xmm4, %xmm0
+; SSE-NEXT: pcmpgtb %xmm5, %xmm1
+; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: pand %xmm2, %xmm8
+; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: pand %xmm3, %xmm9
+; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm10
+; SSE-NEXT: pand %xmm0, %xmm10
+; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm11
+; SSE-NEXT: pand %xmm1, %xmm11
+; SSE-NEXT: pextrb $15, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm11, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $15, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm10, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $15, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm9, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $15, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm8, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: shll $16, %eax
+; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %ecx
+; SSE-NEXT: orl %eax, %ecx
+; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %edx
+; SSE-NEXT: shll $16, %edx
+; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: orl %edx, %eax
+; SSE-NEXT: shlq $32, %rax
+; SSE-NEXT: orq %rcx, %rax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v64i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi3:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi4:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi5:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT: vpcmpgtb %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm1, %ymm8
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpcmpgtb %xmm7, %xmm5, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vandps %ymm0, %ymm8, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm6, %xmm4, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: movl (%rsp), %ecx
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: shlq $32, %rax
+; AVX1-NEXT: orq %rcx, %rax
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v64i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi0:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi1:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi2:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
+; AVX2-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vpcmpgtb %ymm7, %ymm5, %ymm0
+; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpcmpgtb %ymm6, %ymm4, %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: movl (%rsp), %ecx
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: shlq $32, %rax
+; AVX2-NEXT: orq %rcx, %rax
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v64i8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: pushq %rbp
+; AVX512F-NEXT: .Lcfi3:
+; AVX512F-NEXT: .cfi_def_cfa_offset 16
+; AVX512F-NEXT: .Lcfi4:
+; AVX512F-NEXT: .cfi_offset %rbp, -16
+; AVX512F-NEXT: movq %rsp, %rbp
+; AVX512F-NEXT: .Lcfi5:
+; AVX512F-NEXT: .cfi_def_cfa_register %rbp
+; AVX512F-NEXT: andq $-32, %rsp
+; AVX512F-NEXT: subq $64, %rsp
+; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpcmpgtb %ymm7, %ymm5, %ymm2
+; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm4, %ymm2
+; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, (%rsp)
+; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm0
+; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT: movl (%rsp), %ecx
+; AVX512F-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX512F-NEXT: shlq $32, %rax
+; AVX512F-NEXT: orq %rcx, %rax
+; AVX512F-NEXT: movq %rbp, %rsp
+; AVX512F-NEXT: popq %rbp
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v64i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpcmpgtb %zmm3, %zmm2, %k0 {%k1}
+; AVX512BW-NEXT: kmovq %k0, %rax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x0 = icmp sgt <64 x i8> %a, %b
+ %x1 = icmp sgt <64 x i8> %c, %d
+ %y = and <64 x i1> %x0, %x1
+ %res = bitcast <64 x i1> %y to i64
+ ret i64 %res
+}
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
new file mode 100644
index 000000000000..9b6401d1a76c
--- /dev/null
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
@@ -0,0 +1,3483 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512
+
+;
+; 128-bit vectors
+;
+
+define <2 x i64> @ext_i2_2i64(i2 %a0) {
+; SSE2-SSSE3-LABEL: ext_i2_2i64:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: andb $3, %dil
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $62, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movq %rcx, %xmm1
+; SSE2-SSSE3-NEXT: shlq $63, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movq %rax, %xmm0
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: ext_i2_2i64:
+; AVX12: # BB#0:
+; AVX12-NEXT: andb $3, %dil
+; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $62, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vmovq %rcx, %xmm0
+; AVX12-NEXT: shlq $63, %rax
+; AVX12-NEXT: sarq $63, %rax
+; AVX12-NEXT: vmovq %rax, %xmm1
+; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: ext_i2_2i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: andb $3, %dil
+; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX512-NEXT: kmovd %eax, %k1
+; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = bitcast i2 %a0 to <2 x i1>
+ %2 = sext <2 x i1> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define <4 x i32> @ext_i4_4i32(i4 %a0) {
+; SSE2-SSSE3-LABEL: ext_i4_4i32:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: andb $15, %dil
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $60, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $61, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $62, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shlq $63, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm0
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: ext_i4_4i32:
+; AVX12: # BB#0:
+; AVX12-NEXT: andb $15, %dil
+; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $62, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: movq %rax, %rdx
+; AVX12-NEXT: shlq $63, %rdx
+; AVX12-NEXT: sarq $63, %rdx
+; AVX12-NEXT: vmovd %edx, %xmm0
+; AVX12-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $61, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: shlq $60, %rax
+; AVX12-NEXT: sarq $63, %rax
+; AVX12-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: ext_i4_4i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: andb $15, %dil
+; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX512-NEXT: kmovd %eax, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = bitcast i4 %a0 to <4 x i1>
+ %2 = sext <4 x i1> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @ext_i8_8i16(i8 %a0) {
+; SSE2-SSSE3-LABEL: ext_i8_8i16:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shrq $7, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $57, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $58, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $59, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $60, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $61, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shlq $62, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: shlq $63, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: ext_i8_8i16:
+; AVX12: # BB#0:
+; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $62, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: movq %rax, %rdx
+; AVX12-NEXT: shlq $63, %rdx
+; AVX12-NEXT: sarq $63, %rdx
+; AVX12-NEXT: vmovd %edx, %xmm0
+; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $61, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $60, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $59, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $58, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $57, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: shrq $7, %rax
+; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: ext_i8_8i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: vpmovm2w %k0, %xmm0
+; AVX512-NEXT: retq
+ %1 = bitcast i8 %a0 to <8 x i1>
+ %2 = sext <8 x i1> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @ext_i16_16i8(i16 %a0) {
+; SSE2-SSSE3-LABEL: ext_i16_16i8:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: pushq %rbp
+; SSE2-SSSE3-NEXT: .Lcfi0:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16
+; SSE2-SSSE3-NEXT: pushq %r15
+; SSE2-SSSE3-NEXT: .Lcfi1:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24
+; SSE2-SSSE3-NEXT: pushq %r14
+; SSE2-SSSE3-NEXT: .Lcfi2:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32
+; SSE2-SSSE3-NEXT: pushq %r13
+; SSE2-SSSE3-NEXT: .Lcfi3:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40
+; SSE2-SSSE3-NEXT: pushq %r12
+; SSE2-SSSE3-NEXT: .Lcfi4:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48
+; SSE2-SSSE3-NEXT: pushq %rbx
+; SSE2-SSSE3-NEXT: .Lcfi5:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56
+; SSE2-SSSE3-NEXT: .Lcfi6:
+; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56
+; SSE2-SSSE3-NEXT: .Lcfi7:
+; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48
+; SSE2-SSSE3-NEXT: .Lcfi8:
+; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40
+; SSE2-SSSE3-NEXT: .Lcfi9:
+; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32
+; SSE2-SSSE3-NEXT: .Lcfi10:
+; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24
+; SSE2-SSSE3-NEXT: .Lcfi11:
+; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rax
+; SSE2-SSSE3-NEXT: movq %rax, %r8
+; SSE2-SSSE3-NEXT: movq %rax, %r9
+; SSE2-SSSE3-NEXT: movq %rax, %r10
+; SSE2-SSSE3-NEXT: movq %rax, %r11
+; SSE2-SSSE3-NEXT: movq %rax, %r14
+; SSE2-SSSE3-NEXT: movq %rax, %r15
+; SSE2-SSSE3-NEXT: movq %rax, %r12
+; SSE2-SSSE3-NEXT: movq %rax, %r13
+; SSE2-SSSE3-NEXT: movq %rax, %rbx
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: movq %rax, %rdx
+; SSE2-SSSE3-NEXT: movq %rax, %rsi
+; SSE2-SSSE3-NEXT: movq %rax, %rdi
+; SSE2-SSSE3-NEXT: movq %rax, %rbp
+; SSE2-SSSE3-NEXT: shrq $15, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm0
+; SSE2-SSSE3-NEXT: movq %rax, %rbp
+; SSE2-SSSE3-NEXT: movsbq %al, %rax
+; SSE2-SSSE3-NEXT: shlq $49, %r8
+; SSE2-SSSE3-NEXT: sarq $63, %r8
+; SSE2-SSSE3-NEXT: movd %r8d, %xmm1
+; SSE2-SSSE3-NEXT: shlq $50, %r9
+; SSE2-SSSE3-NEXT: sarq $63, %r9
+; SSE2-SSSE3-NEXT: movd %r9d, %xmm2
+; SSE2-SSSE3-NEXT: shlq $51, %r10
+; SSE2-SSSE3-NEXT: sarq $63, %r10
+; SSE2-SSSE3-NEXT: movd %r10d, %xmm3
+; SSE2-SSSE3-NEXT: shlq $52, %r11
+; SSE2-SSSE3-NEXT: sarq $63, %r11
+; SSE2-SSSE3-NEXT: movd %r11d, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: shlq $53, %r14
+; SSE2-SSSE3-NEXT: sarq $63, %r14
+; SSE2-SSSE3-NEXT: movd %r14d, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: shlq $54, %r15
+; SSE2-SSSE3-NEXT: sarq $63, %r15
+; SSE2-SSSE3-NEXT: movd %r15d, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE2-SSSE3-NEXT: shlq $55, %r12
+; SSE2-SSSE3-NEXT: sarq $63, %r12
+; SSE2-SSSE3-NEXT: movd %r12d, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-SSSE3-NEXT: shlq $60, %r13
+; SSE2-SSSE3-NEXT: sarq $63, %r13
+; SSE2-SSSE3-NEXT: movd %r13d, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-SSSE3-NEXT: shlq $61, %rbx
+; SSE2-SSSE3-NEXT: sarq $63, %rbx
+; SSE2-SSSE3-NEXT: movd %ebx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: shlq $62, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-SSSE3-NEXT: shlq $63, %rdx
+; SSE2-SSSE3-NEXT: sarq $63, %rdx
+; SSE2-SSSE3-NEXT: movd %edx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-SSSE3-NEXT: shlq $58, %rsi
+; SSE2-SSSE3-NEXT: sarq $63, %rsi
+; SSE2-SSSE3-NEXT: movd %esi, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE2-SSSE3-NEXT: shlq $59, %rdi
+; SSE2-SSSE3-NEXT: sarq $63, %rdi
+; SSE2-SSSE3-NEXT: movd %edi, %xmm4
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-SSSE3-NEXT: shlq $57, %rbp
+; SSE2-SSSE3-NEXT: sarq $63, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm2
+; SSE2-SSSE3-NEXT: shrq $7, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: popq %rbx
+; SSE2-SSSE3-NEXT: popq %r12
+; SSE2-SSSE3-NEXT: popq %r13
+; SSE2-SSSE3-NEXT: popq %r14
+; SSE2-SSSE3-NEXT: popq %r15
+; SSE2-SSSE3-NEXT: popq %rbp
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: ext_i16_16i8:
+; AVX12: # BB#0:
+; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movswq -{{[0-9]+}}(%rsp), %rax
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $62, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: movq %rax, %rdx
+; AVX12-NEXT: shlq $63, %rdx
+; AVX12-NEXT: sarq $63, %rdx
+; AVX12-NEXT: vmovd %edx, %xmm0
+; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $61, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $60, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $59, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $58, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $57, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movsbq %al, %rcx
+; AVX12-NEXT: shrq $7, %rcx
+; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $55, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $54, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $53, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $52, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $51, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $50, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movq %rax, %rcx
+; AVX12-NEXT: shlq $49, %rcx
+; AVX12-NEXT: sarq $63, %rcx
+; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: shrq $15, %rax
+; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: ext_i16_16i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: vpmovm2b %k0, %xmm0
+; AVX512-NEXT: retq
+ %1 = bitcast i16 %a0 to <16 x i1>
+ %2 = sext <16 x i1> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; 256-bit vectors
+;
+
+define <4 x i64> @ext_i4_4i64(i4 %a0) {
+; SSE2-SSSE3-LABEL: ext_i4_4i64:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: andb $15, %dil
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-SSSE3-NEXT: movd %eax, %xmm2
+; SSE2-SSSE3-NEXT: shrl %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm0
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; SSE2-SSSE3-NEXT: psllq $63, %xmm0
+; SSE2-SSSE3-NEXT: psrad $31, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
+; SSE2-SSSE3-NEXT: psllq $63, %xmm1
+; SSE2-SSSE3-NEXT: psrad $31, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i4_4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: andb $15, %dil
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shlq $60, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shlq $61, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shlq $62, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm1
+; AVX1-NEXT: shlq $63, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i4_4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: andb $15, %dil
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shlq $60, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vmovq %rcx, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shlq $61, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vmovq %rcx, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shlq $62, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vmovq %rcx, %xmm1
+; AVX2-NEXT: shlq $63, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i4_4i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: andb $15, %dil
+; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX512-NEXT: kmovd %eax, %k1
+; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: retq
+ %1 = bitcast i4 %a0 to <4 x i1>
+ %2 = sext <4 x i1> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define <8 x i32> @ext_i8_8i32(i8 %a0) {
+; SSE2-SSSE3-LABEL: ext_i8_8i32:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: shrl $7, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm3
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-SSSE3-NEXT: pslld $31, %xmm0
+; SSE2-SSSE3-NEXT: psrad $31, %xmm0
+; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pslld $31, %xmm1
+; SSE2-SSSE3-NEXT: psrad $31, %xmm1
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i8_8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shlq $58, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rax, %rdx
+; AVX1-NEXT: shlq $59, %rdx
+; AVX1-NEXT: sarq $63, %rdx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shlq $57, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $7, %rcx
+; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shlq $62, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: movq %rax, %rdx
+; AVX1-NEXT: shlq $63, %rdx
+; AVX1-NEXT: sarq $63, %rdx
+; AVX1-NEXT: vmovd %edx, %xmm1
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shlq $61, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $60, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i8_8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shlq $58, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rax, %rdx
+; AVX2-NEXT: shlq $59, %rdx
+; AVX2-NEXT: sarq $63, %rdx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shlq $57, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $7, %rcx
+; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shlq $62, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: movq %rax, %rdx
+; AVX2-NEXT: shlq $63, %rdx
+; AVX2-NEXT: sarq $63, %rdx
+; AVX2-NEXT: vmovd %edx, %xmm1
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shlq $61, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $60, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i8_8i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i8 %a0 to <8 x i1>
+ %2 = sext <8 x i1> %1 to <8 x i32>
+ ret <8 x i32> %2
+}
+
+define <16 x i16> @ext_i16_16i16(i16 %a0) {
+; SSE2-SSSE3-LABEL: ext_i16_16i16:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: psllw $15, %xmm0
+; SSE2-SSSE3-NEXT: psraw $15, %xmm0
+; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-SSSE3-NEXT: psllw $15, %xmm1
+; SSE2-SSSE3-NEXT: psraw $15, %xmm1
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i16_16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi0:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: .Lcfi1:
+; AVX1-NEXT: .cfi_def_cfa_offset 24
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: .Lcfi2:
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: .Lcfi3:
+; AVX1-NEXT: .cfi_def_cfa_offset 40
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: .Lcfi4:
+; AVX1-NEXT: .cfi_def_cfa_offset 48
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: .Lcfi5:
+; AVX1-NEXT: .cfi_def_cfa_offset 56
+; AVX1-NEXT: .Lcfi6:
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .Lcfi7:
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .Lcfi8:
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .Lcfi9:
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .Lcfi10:
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: .Lcfi11:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movswq -{{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shlq $55, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: movq %rax, %r8
+; AVX1-NEXT: movq %rax, %r10
+; AVX1-NEXT: movq %rax, %r11
+; AVX1-NEXT: movq %rax, %r14
+; AVX1-NEXT: movq %rax, %r15
+; AVX1-NEXT: movq %rax, %r9
+; AVX1-NEXT: movq %rax, %r12
+; AVX1-NEXT: movq %rax, %r13
+; AVX1-NEXT: movq %rax, %rbx
+; AVX1-NEXT: movq %rax, %rdi
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: movq %rax, %rdx
+; AVX1-NEXT: movq %rax, %rsi
+; AVX1-NEXT: movsbq %al, %rbp
+; AVX1-NEXT: shlq $54, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: shlq $53, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0
+; AVX1-NEXT: shlq $52, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0
+; AVX1-NEXT: shlq $51, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0
+; AVX1-NEXT: shlq $50, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0
+; AVX1-NEXT: shlq $49, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0
+; AVX1-NEXT: shrq $15, %r9
+; AVX1-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0
+; AVX1-NEXT: shlq $63, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vmovd %r13d, %xmm1
+; AVX1-NEXT: shlq $62, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $61, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $60, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1
+; AVX1-NEXT: shlq $59, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $58, %rdx
+; AVX1-NEXT: sarq $63, %rdx
+; AVX1-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $57, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1
+; AVX1-NEXT: shrq $7, %rbp
+; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i16_16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi0:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: .Lcfi1:
+; AVX2-NEXT: .cfi_def_cfa_offset 24
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: .Lcfi2:
+; AVX2-NEXT: .cfi_def_cfa_offset 32
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: .Lcfi3:
+; AVX2-NEXT: .cfi_def_cfa_offset 40
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: .Lcfi4:
+; AVX2-NEXT: .cfi_def_cfa_offset 48
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: .Lcfi5:
+; AVX2-NEXT: .cfi_def_cfa_offset 56
+; AVX2-NEXT: .Lcfi6:
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .Lcfi7:
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .Lcfi8:
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .Lcfi9:
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .Lcfi10:
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: .Lcfi11:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movswq -{{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shlq $55, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: movq %rax, %r8
+; AVX2-NEXT: movq %rax, %r10
+; AVX2-NEXT: movq %rax, %r11
+; AVX2-NEXT: movq %rax, %r14
+; AVX2-NEXT: movq %rax, %r15
+; AVX2-NEXT: movq %rax, %r9
+; AVX2-NEXT: movq %rax, %r12
+; AVX2-NEXT: movq %rax, %r13
+; AVX2-NEXT: movq %rax, %rbx
+; AVX2-NEXT: movq %rax, %rdi
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: movq %rax, %rdx
+; AVX2-NEXT: movq %rax, %rsi
+; AVX2-NEXT: movsbq %al, %rbp
+; AVX2-NEXT: shlq $54, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: shlq $53, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0
+; AVX2-NEXT: shlq $52, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0
+; AVX2-NEXT: shlq $51, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0
+; AVX2-NEXT: shlq $50, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0
+; AVX2-NEXT: shlq $49, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0
+; AVX2-NEXT: shrq $15, %r9
+; AVX2-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0
+; AVX2-NEXT: shlq $63, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vmovd %r13d, %xmm1
+; AVX2-NEXT: shlq $62, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $61, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $60, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1
+; AVX2-NEXT: shlq $59, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $58, %rdx
+; AVX2-NEXT: sarq $63, %rdx
+; AVX2-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $57, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1
+; AVX2-NEXT: shrq $7, %rbp
+; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i16_16i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: vpmovm2w %k0, %ymm0
+; AVX512-NEXT: retq
+ %1 = bitcast i16 %a0 to <16 x i1>
+ %2 = sext <16 x i1> %1 to <16 x i16>
+ ret <16 x i16> %2
+}
+
+define <32 x i8> @ext_i32_32i8(i32 %a0) {
+; SSE2-SSSE3-LABEL: ext_i32_32i8:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: pushq %rbp
+; SSE2-SSSE3-NEXT: .Lcfi12:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16
+; SSE2-SSSE3-NEXT: pushq %r15
+; SSE2-SSSE3-NEXT: .Lcfi13:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24
+; SSE2-SSSE3-NEXT: pushq %r14
+; SSE2-SSSE3-NEXT: .Lcfi14:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32
+; SSE2-SSSE3-NEXT: pushq %r13
+; SSE2-SSSE3-NEXT: .Lcfi15:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40
+; SSE2-SSSE3-NEXT: pushq %r12
+; SSE2-SSSE3-NEXT: .Lcfi16:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48
+; SSE2-SSSE3-NEXT: pushq %rbx
+; SSE2-SSSE3-NEXT: .Lcfi17:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56
+; SSE2-SSSE3-NEXT: .Lcfi18:
+; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56
+; SSE2-SSSE3-NEXT: .Lcfi19:
+; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48
+; SSE2-SSSE3-NEXT: .Lcfi20:
+; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40
+; SSE2-SSSE3-NEXT: .Lcfi21:
+; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32
+; SSE2-SSSE3-NEXT: .Lcfi22:
+; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24
+; SSE2-SSSE3-NEXT: .Lcfi23:
+; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: shrl $16, %edi
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rbx
+; SSE2-SSSE3-NEXT: movq %rbx, %r8
+; SSE2-SSSE3-NEXT: movq %rbx, %r9
+; SSE2-SSSE3-NEXT: movq %rbx, %r10
+; SSE2-SSSE3-NEXT: movq %rbx, %r11
+; SSE2-SSSE3-NEXT: movq %rbx, %r14
+; SSE2-SSSE3-NEXT: movq %rbx, %r15
+; SSE2-SSSE3-NEXT: movq %rbx, %r12
+; SSE2-SSSE3-NEXT: movq %rbx, %r13
+; SSE2-SSSE3-NEXT: movq %rbx, %rdi
+; SSE2-SSSE3-NEXT: movq %rbx, %rcx
+; SSE2-SSSE3-NEXT: movq %rbx, %rdx
+; SSE2-SSSE3-NEXT: movq %rbx, %rbp
+; SSE2-SSSE3-NEXT: movq %rbx, %rsi
+; SSE2-SSSE3-NEXT: movq %rbx, %rax
+; SSE2-SSSE3-NEXT: shrq $15, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm0
+; SSE2-SSSE3-NEXT: movq %rbx, %rax
+; SSE2-SSSE3-NEXT: movsbq %bl, %rbx
+; SSE2-SSSE3-NEXT: shlq $49, %r8
+; SSE2-SSSE3-NEXT: sarq $63, %r8
+; SSE2-SSSE3-NEXT: movd %r8d, %xmm15
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-SSSE3-NEXT: shlq $50, %r9
+; SSE2-SSSE3-NEXT: sarq $63, %r9
+; SSE2-SSSE3-NEXT: movd %r9d, %xmm8
+; SSE2-SSSE3-NEXT: shlq $51, %r10
+; SSE2-SSSE3-NEXT: sarq $63, %r10
+; SSE2-SSSE3-NEXT: movd %r10d, %xmm3
+; SSE2-SSSE3-NEXT: shlq $52, %r11
+; SSE2-SSSE3-NEXT: sarq $63, %r11
+; SSE2-SSSE3-NEXT: movd %r11d, %xmm9
+; SSE2-SSSE3-NEXT: shlq $53, %r14
+; SSE2-SSSE3-NEXT: sarq $63, %r14
+; SSE2-SSSE3-NEXT: movd %r14d, %xmm6
+; SSE2-SSSE3-NEXT: shlq $54, %r15
+; SSE2-SSSE3-NEXT: sarq $63, %r15
+; SSE2-SSSE3-NEXT: movd %r15d, %xmm10
+; SSE2-SSSE3-NEXT: shlq $55, %r12
+; SSE2-SSSE3-NEXT: sarq $63, %r12
+; SSE2-SSSE3-NEXT: movd %r12d, %xmm1
+; SSE2-SSSE3-NEXT: shlq $60, %r13
+; SSE2-SSSE3-NEXT: sarq $63, %r13
+; SSE2-SSSE3-NEXT: movd %r13d, %xmm11
+; SSE2-SSSE3-NEXT: shlq $61, %rdi
+; SSE2-SSSE3-NEXT: sarq $63, %rdi
+; SSE2-SSSE3-NEXT: movd %edi, %xmm5
+; SSE2-SSSE3-NEXT: shlq $62, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm12
+; SSE2-SSSE3-NEXT: shlq $63, %rdx
+; SSE2-SSSE3-NEXT: sarq $63, %rdx
+; SSE2-SSSE3-NEXT: movd %edx, %xmm0
+; SSE2-SSSE3-NEXT: shlq $58, %rbp
+; SSE2-SSSE3-NEXT: sarq $63, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm13
+; SSE2-SSSE3-NEXT: shlq $59, %rsi
+; SSE2-SSSE3-NEXT: sarq $63, %rsi
+; SSE2-SSSE3-NEXT: movd %esi, %xmm7
+; SSE2-SSSE3-NEXT: shlq $57, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: shrq $7, %rbx
+; SSE2-SSSE3-NEXT: movd %ebx, %xmm14
+; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi
+; SSE2-SSSE3-NEXT: movq %rsi, %r8
+; SSE2-SSSE3-NEXT: movq %rsi, %r9
+; SSE2-SSSE3-NEXT: movq %rsi, %r10
+; SSE2-SSSE3-NEXT: movq %rsi, %r11
+; SSE2-SSSE3-NEXT: movq %rsi, %r14
+; SSE2-SSSE3-NEXT: movq %rsi, %r15
+; SSE2-SSSE3-NEXT: movq %rsi, %r12
+; SSE2-SSSE3-NEXT: movq %rsi, %r13
+; SSE2-SSSE3-NEXT: movq %rsi, %rbx
+; SSE2-SSSE3-NEXT: movq %rsi, %rax
+; SSE2-SSSE3-NEXT: movq %rsi, %rcx
+; SSE2-SSSE3-NEXT: movq %rsi, %rdx
+; SSE2-SSSE3-NEXT: movq %rsi, %rdi
+; SSE2-SSSE3-NEXT: movq %rsi, %rbp
+; SSE2-SSSE3-NEXT: shrq $15, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm2
+; SSE2-SSSE3-NEXT: movq %rsi, %rbp
+; SSE2-SSSE3-NEXT: movsbq %sil, %rsi
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; SSE2-SSSE3-NEXT: shlq $49, %r8
+; SSE2-SSSE3-NEXT: sarq $63, %r8
+; SSE2-SSSE3-NEXT: movd %r8d, %xmm3
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-SSSE3-NEXT: shlq $50, %r9
+; SSE2-SSSE3-NEXT: sarq $63, %r9
+; SSE2-SSSE3-NEXT: movd %r9d, %xmm4
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
+; SSE2-SSSE3-NEXT: shlq $51, %r10
+; SSE2-SSSE3-NEXT: sarq $63, %r10
+; SSE2-SSSE3-NEXT: movd %r10d, %xmm5
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: shlq $52, %r11
+; SSE2-SSSE3-NEXT: sarq $63, %r11
+; SSE2-SSSE3-NEXT: movd %r11d, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: shlq $53, %r14
+; SSE2-SSSE3-NEXT: sarq $63, %r14
+; SSE2-SSSE3-NEXT: movd %r14d, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-SSSE3-NEXT: shlq $54, %r15
+; SSE2-SSSE3-NEXT: sarq $63, %r15
+; SSE2-SSSE3-NEXT: movd %r15d, %xmm4
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; SSE2-SSSE3-NEXT: shlq $55, %r12
+; SSE2-SSSE3-NEXT: sarq $63, %r12
+; SSE2-SSSE3-NEXT: movd %r12d, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: shlq $60, %r13
+; SSE2-SSSE3-NEXT: sarq $63, %r13
+; SSE2-SSSE3-NEXT: movd %r13d, %xmm6
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-SSSE3-NEXT: shlq $61, %rbx
+; SSE2-SSSE3-NEXT: sarq $63, %rbx
+; SSE2-SSSE3-NEXT: movd %ebx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: shlq $62, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm2
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE2-SSSE3-NEXT: shlq $63, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; SSE2-SSSE3-NEXT: shlq $58, %rdx
+; SSE2-SSSE3-NEXT: sarq $63, %rdx
+; SSE2-SSSE3-NEXT: movd %edx, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-SSSE3-NEXT: shlq $59, %rdi
+; SSE2-SSSE3-NEXT: sarq $63, %rdi
+; SSE2-SSSE3-NEXT: movd %edi, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE2-SSSE3-NEXT: shlq $57, %rbp
+; SSE2-SSSE3-NEXT: sarq $63, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm4
+; SSE2-SSSE3-NEXT: shrq $7, %rsi
+; SSE2-SSSE3-NEXT: movd %esi, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-SSSE3-NEXT: popq %rbx
+; SSE2-SSSE3-NEXT: popq %r12
+; SSE2-SSSE3-NEXT: popq %r13
+; SSE2-SSSE3-NEXT: popq %r14
+; SSE2-SSSE3-NEXT: popq %r15
+; SSE2-SSSE3-NEXT: popq %rbp
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i32_32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi12:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi13:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi14:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: .Lcfi15:
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .Lcfi16:
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .Lcfi17:
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .Lcfi18:
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .Lcfi19:
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: movl %edi, (%rsp)
+; AVX1-NEXT: movslq (%rsp), %rdx
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: shlq $47, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: movq %rdx, %r13
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: movq %rdx, %r10
+; AVX1-NEXT: movq %rdx, %r11
+; AVX1-NEXT: movq %rdx, %r9
+; AVX1-NEXT: movq %rdx, %rbx
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: movq %rdx, %r15
+; AVX1-NEXT: movq %rdx, %r12
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: shlq $46, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX1-NEXT: shlq $45, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: shlq $44, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: shlq $43, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: shlq $42, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: shlq $41, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r13
+; AVX1-NEXT: shlq $40, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: shlq $39, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r10
+; AVX1-NEXT: shlq $38, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
+; AVX1-NEXT: movsbq %dl, %rax
+; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: shlq $37, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r9
+; AVX1-NEXT: shlq $36, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rbx
+; AVX1-NEXT: shlq $35, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: shlq $34, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r15
+; AVX1-NEXT: shlq $33, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r12
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX1-NEXT: shrq $31, %rax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: shlq $63, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vmovd %r8d, %xmm1
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: movswq %dx, %rdx
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; AVX1-NEXT: shlq $62, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $61, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $60, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
+; AVX1-NEXT: shlq $59, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $58, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
+; AVX1-NEXT: shlq $57, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; AVX1-NEXT: shrq $7, %rcx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $55, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $54, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $53, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $52, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $51, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $50, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: shlq $49, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1
+; AVX1-NEXT: shrq $15, %rdx
+; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: leaq -40(%rbp), %rsp
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i32_32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi12:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi13:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi14:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
+; AVX2-NEXT: .Lcfi15:
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .Lcfi16:
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .Lcfi17:
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .Lcfi18:
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .Lcfi19:
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: movl %edi, (%rsp)
+; AVX2-NEXT: movslq (%rsp), %rdx
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: shlq $47, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: movq %rdx, %r13
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: movq %rdx, %r11
+; AVX2-NEXT: movq %rdx, %r9
+; AVX2-NEXT: movq %rdx, %rbx
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: movq %rdx, %r15
+; AVX2-NEXT: movq %rdx, %r12
+; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: shlq $46, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX2-NEXT: shlq $45, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: shlq $44, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: shlq $43, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: shlq $42, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: shlq $41, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r13
+; AVX2-NEXT: shlq $40, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: shlq $39, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: shlq $38, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
+; AVX2-NEXT: movsbq %dl, %rax
+; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: shlq $37, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r9
+; AVX2-NEXT: shlq $36, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rbx
+; AVX2-NEXT: shlq $35, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: shlq $34, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r15
+; AVX2-NEXT: shlq $33, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r12
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX2-NEXT: shrq $31, %rax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: shlq $63, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vmovd %r8d, %xmm1
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: movswq %dx, %rdx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; AVX2-NEXT: shlq $62, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $61, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $60, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
+; AVX2-NEXT: shlq $59, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $58, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
+; AVX2-NEXT: shlq $57, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; AVX2-NEXT: shrq $7, %rcx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $55, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $54, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $53, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $52, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $51, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $50, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: shlq $49, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1
+; AVX2-NEXT: shrq $15, %rdx
+; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: leaq -40(%rbp), %rsp
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i32_32i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: vpmovm2b %k0, %ymm0
+; AVX512-NEXT: retq
+ %1 = bitcast i32 %a0 to <32 x i1>
+ %2 = sext <32 x i1> %1 to <32 x i8>
+ ret <32 x i8> %2
+}
+
+;
+; 512-bit vectors
+;
+
+define <8 x i64> @ext_i8_8i64(i8 %a0) {
+; SSE2-SSSE3-LABEL: ext_i8_8i64:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: shrl $7, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3]
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: psllq $63, %xmm0
+; SSE2-SSSE3-NEXT: psrad $31, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3]
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: psllq $63, %xmm1
+; SSE2-SSSE3-NEXT: psrad $31, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3]
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: psllq $63, %xmm2
+; SSE2-SSSE3-NEXT: psrad $31, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3]
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: psllq $63, %xmm3
+; SSE2-SSSE3-NEXT: psrad $31, %xmm3
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i8_8i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: movl %eax, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $5, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $6, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $7, %eax
+; AVX1-NEXT: movzwl %ax, %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i8_8i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: movl %eax, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $3, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $5, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $6, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $7, %eax
+; AVX2-NEXT: movzwl %ax, %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1
+; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i8_8i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i8 %a0 to <8 x i1>
+ %2 = sext <8 x i1> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define <16 x i32> @ext_i16_16i32(i16 %a0) {
+; SSE2-SSSE3-LABEL: ext_i16_16i32:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-SSSE3-NEXT: pslld $31, %xmm0
+; SSE2-SSSE3-NEXT: psrad $31, %xmm0
+; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pslld $31, %xmm1
+; SSE2-SSSE3-NEXT: psrad $31, %xmm1
+; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-SSSE3-NEXT: pslld $31, %xmm2
+; SSE2-SSSE3-NEXT: psrad $31, %xmm2
+; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pslld $31, %xmm3
+; SSE2-SSSE3-NEXT: psrad $31, %xmm3
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i16_16i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: movl %eax, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $5, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $6, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $7, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $8, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $9, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $10, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $11, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $12, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $13, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $14, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $15, %eax
+; AVX1-NEXT: movzwl %ax, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i16_16i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: movl %eax, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $3, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $5, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $6, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $7, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $8, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $9, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $10, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $11, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $12, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $13, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $14, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $15, %eax
+; AVX2-NEXT: movzwl %ax, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
+; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i16_16i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i16 %a0 to <16 x i1>
+ %2 = sext <16 x i1> %1 to <16 x i32>
+ ret <16 x i32> %2
+}
+
+define <32 x i16> @ext_i32_32i16(i32 %a0) {
+; SSE2-SSSE3-LABEL: ext_i32_32i16:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movl %edi, %eax
+; SSE2-SSSE3-NEXT: shrl $16, %eax
+; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: psllw $15, %xmm0
+; SSE2-SSSE3-NEXT: psraw $15, %xmm0
+; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-SSSE3-NEXT: psllw $15, %xmm1
+; SSE2-SSSE3-NEXT: psraw $15, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: psllw $15, %xmm2
+; SSE2-SSSE3-NEXT: psraw $15, %xmm2
+; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-SSSE3-NEXT: psllw $15, %xmm3
+; SSE2-SSSE3-NEXT: psraw $15, %xmm3
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i32_32i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi20:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi21:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi22:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $128, %rsp
+; AVX1-NEXT: .Lcfi23:
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .Lcfi24:
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .Lcfi25:
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .Lcfi26:
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .Lcfi27:
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, %r13d
+; AVX1-NEXT: movl %edi, %r12d
+; AVX1-NEXT: movl %edi, %r15d
+; AVX1-NEXT: movl %edi, %r14d
+; AVX1-NEXT: movl %edi, %ebx
+; AVX1-NEXT: movl %edi, %r11d
+; AVX1-NEXT: movl %edi, %r10d
+; AVX1-NEXT: movl %edi, %r9d
+; AVX1-NEXT: movl %edi, %r8d
+; AVX1-NEXT: movl %edi, %esi
+; AVX1-NEXT: movl %edi, %edx
+; AVX1-NEXT: movl %edi, %ecx
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: andl $1, %edi
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: shrl %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $3, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $4, %esi
+; AVX1-NEXT: andl $1, %esi
+; AVX1-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0
+; AVX1-NEXT: shrl $5, %r8d
+; AVX1-NEXT: andl $1, %r8d
+; AVX1-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $6, %r9d
+; AVX1-NEXT: andl $1, %r9d
+; AVX1-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $7, %r10d
+; AVX1-NEXT: andl $1, %r10d
+; AVX1-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $8, %r11d
+; AVX1-NEXT: andl $1, %r11d
+; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $9, %ebx
+; AVX1-NEXT: andl $1, %ebx
+; AVX1-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $10, %r14d
+; AVX1-NEXT: andl $1, %r14d
+; AVX1-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $11, %r15d
+; AVX1-NEXT: andl $1, %r15d
+; AVX1-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $12, %r12d
+; AVX1-NEXT: andl $1, %r12d
+; AVX1-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $13, %r13d
+; AVX1-NEXT: andl $1, %r13d
+; AVX1-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $14, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $15, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $16, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $17, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $18, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $19, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $20, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $21, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $22, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $23, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $25, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $26, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $27, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $28, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $29, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $30, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $31, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2
+; AVX1-NEXT: vpsraw $15, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2
+; AVX1-NEXT: vpsraw $15, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1
+; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: leaq -40(%rbp), %rsp
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i32_32i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi20:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi21:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi22:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $128, %rsp
+; AVX2-NEXT: .Lcfi23:
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .Lcfi24:
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .Lcfi25:
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .Lcfi26:
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .Lcfi27:
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, %r13d
+; AVX2-NEXT: movl %edi, %r12d
+; AVX2-NEXT: movl %edi, %r15d
+; AVX2-NEXT: movl %edi, %r14d
+; AVX2-NEXT: movl %edi, %ebx
+; AVX2-NEXT: movl %edi, %r11d
+; AVX2-NEXT: movl %edi, %r10d
+; AVX2-NEXT: movl %edi, %r9d
+; AVX2-NEXT: movl %edi, %r8d
+; AVX2-NEXT: movl %edi, %esi
+; AVX2-NEXT: movl %edi, %edx
+; AVX2-NEXT: movl %edi, %ecx
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: andl $1, %edi
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: shrl %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $3, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $4, %esi
+; AVX2-NEXT: andl $1, %esi
+; AVX2-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0
+; AVX2-NEXT: shrl $5, %r8d
+; AVX2-NEXT: andl $1, %r8d
+; AVX2-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $6, %r9d
+; AVX2-NEXT: andl $1, %r9d
+; AVX2-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $7, %r10d
+; AVX2-NEXT: andl $1, %r10d
+; AVX2-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $8, %r11d
+; AVX2-NEXT: andl $1, %r11d
+; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $9, %ebx
+; AVX2-NEXT: andl $1, %ebx
+; AVX2-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $10, %r14d
+; AVX2-NEXT: andl $1, %r14d
+; AVX2-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $11, %r15d
+; AVX2-NEXT: andl $1, %r15d
+; AVX2-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $12, %r12d
+; AVX2-NEXT: andl $1, %r12d
+; AVX2-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $13, %r13d
+; AVX2-NEXT: andl $1, %r13d
+; AVX2-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $14, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $15, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $16, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $17, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $18, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $19, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $20, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $21, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $22, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $23, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $24, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $25, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $26, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $27, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $28, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $29, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $30, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $31, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0
+; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT: vpsllw $15, %ymm1, %ymm1
+; AVX2-NEXT: vpsraw $15, %ymm1, %ymm1
+; AVX2-NEXT: leaq -40(%rbp), %rsp
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i32_32i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: vpmovm2w %k0, %zmm0
+; AVX512-NEXT: retq
+ %1 = bitcast i32 %a0 to <32 x i1>
+ %2 = sext <32 x i1> %1 to <32 x i16>
+ ret <32 x i16> %2
+}
+
+define <64 x i8> @ext_i64_64i8(i64 %a0) {
+; SSE2-SSSE3-LABEL: ext_i64_64i8:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: pushq %rbp
+; SSE2-SSSE3-NEXT: .Lcfi24:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16
+; SSE2-SSSE3-NEXT: pushq %r15
+; SSE2-SSSE3-NEXT: .Lcfi25:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24
+; SSE2-SSSE3-NEXT: pushq %r14
+; SSE2-SSSE3-NEXT: .Lcfi26:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32
+; SSE2-SSSE3-NEXT: pushq %r13
+; SSE2-SSSE3-NEXT: .Lcfi27:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40
+; SSE2-SSSE3-NEXT: pushq %r12
+; SSE2-SSSE3-NEXT: .Lcfi28:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48
+; SSE2-SSSE3-NEXT: pushq %rbx
+; SSE2-SSSE3-NEXT: .Lcfi29:
+; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56
+; SSE2-SSSE3-NEXT: .Lcfi30:
+; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56
+; SSE2-SSSE3-NEXT: .Lcfi31:
+; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48
+; SSE2-SSSE3-NEXT: .Lcfi32:
+; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40
+; SSE2-SSSE3-NEXT: .Lcfi33:
+; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32
+; SSE2-SSSE3-NEXT: .Lcfi34:
+; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24
+; SSE2-SSSE3-NEXT: .Lcfi35:
+; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movq %rdi, %rax
+; SSE2-SSSE3-NEXT: shrq $32, %rax
+; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movq %rdi, %rax
+; SSE2-SSSE3-NEXT: shrq $48, %rax
+; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: shrl $16, %edi
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rbx
+; SSE2-SSSE3-NEXT: movq %rbx, %r8
+; SSE2-SSSE3-NEXT: movq %rbx, %r9
+; SSE2-SSSE3-NEXT: movq %rbx, %r10
+; SSE2-SSSE3-NEXT: movq %rbx, %r11
+; SSE2-SSSE3-NEXT: movq %rbx, %r14
+; SSE2-SSSE3-NEXT: movq %rbx, %r15
+; SSE2-SSSE3-NEXT: movq %rbx, %r12
+; SSE2-SSSE3-NEXT: movq %rbx, %r13
+; SSE2-SSSE3-NEXT: movq %rbx, %rdi
+; SSE2-SSSE3-NEXT: movq %rbx, %rcx
+; SSE2-SSSE3-NEXT: movq %rbx, %rdx
+; SSE2-SSSE3-NEXT: movq %rbx, %rsi
+; SSE2-SSSE3-NEXT: movq %rbx, %rbp
+; SSE2-SSSE3-NEXT: movq %rbx, %rax
+; SSE2-SSSE3-NEXT: shrq $15, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm0
+; SSE2-SSSE3-NEXT: movq %rbx, %rax
+; SSE2-SSSE3-NEXT: movsbq %bl, %rbx
+; SSE2-SSSE3-NEXT: shlq $49, %r8
+; SSE2-SSSE3-NEXT: sarq $63, %r8
+; SSE2-SSSE3-NEXT: movd %r8d, %xmm15
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-SSSE3-NEXT: shlq $50, %r9
+; SSE2-SSSE3-NEXT: sarq $63, %r9
+; SSE2-SSSE3-NEXT: movd %r9d, %xmm8
+; SSE2-SSSE3-NEXT: shlq $51, %r10
+; SSE2-SSSE3-NEXT: sarq $63, %r10
+; SSE2-SSSE3-NEXT: movd %r10d, %xmm2
+; SSE2-SSSE3-NEXT: shlq $52, %r11
+; SSE2-SSSE3-NEXT: sarq $63, %r11
+; SSE2-SSSE3-NEXT: movd %r11d, %xmm9
+; SSE2-SSSE3-NEXT: shlq $53, %r14
+; SSE2-SSSE3-NEXT: sarq $63, %r14
+; SSE2-SSSE3-NEXT: movd %r14d, %xmm6
+; SSE2-SSSE3-NEXT: shlq $54, %r15
+; SSE2-SSSE3-NEXT: sarq $63, %r15
+; SSE2-SSSE3-NEXT: movd %r15d, %xmm10
+; SSE2-SSSE3-NEXT: shlq $55, %r12
+; SSE2-SSSE3-NEXT: sarq $63, %r12
+; SSE2-SSSE3-NEXT: movd %r12d, %xmm4
+; SSE2-SSSE3-NEXT: shlq $60, %r13
+; SSE2-SSSE3-NEXT: sarq $63, %r13
+; SSE2-SSSE3-NEXT: movd %r13d, %xmm11
+; SSE2-SSSE3-NEXT: shlq $61, %rdi
+; SSE2-SSSE3-NEXT: sarq $63, %rdi
+; SSE2-SSSE3-NEXT: movd %edi, %xmm5
+; SSE2-SSSE3-NEXT: shlq $62, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm12
+; SSE2-SSSE3-NEXT: shlq $63, %rdx
+; SSE2-SSSE3-NEXT: sarq $63, %rdx
+; SSE2-SSSE3-NEXT: movd %edx, %xmm0
+; SSE2-SSSE3-NEXT: shlq $58, %rsi
+; SSE2-SSSE3-NEXT: sarq $63, %rsi
+; SSE2-SSSE3-NEXT: movd %esi, %xmm13
+; SSE2-SSSE3-NEXT: shlq $59, %rbp
+; SSE2-SSSE3-NEXT: sarq $63, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm7
+; SSE2-SSSE3-NEXT: shlq $57, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm3
+; SSE2-SSSE3-NEXT: shrq $7, %rbx
+; SSE2-SSSE3-NEXT: movd %ebx, %xmm14
+; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi
+; SSE2-SSSE3-NEXT: movq %rsi, %r8
+; SSE2-SSSE3-NEXT: movq %rsi, %r9
+; SSE2-SSSE3-NEXT: movq %rsi, %r10
+; SSE2-SSSE3-NEXT: movq %rsi, %r11
+; SSE2-SSSE3-NEXT: movq %rsi, %r14
+; SSE2-SSSE3-NEXT: movq %rsi, %r15
+; SSE2-SSSE3-NEXT: movq %rsi, %r12
+; SSE2-SSSE3-NEXT: movq %rsi, %r13
+; SSE2-SSSE3-NEXT: movq %rsi, %rbx
+; SSE2-SSSE3-NEXT: movq %rsi, %rax
+; SSE2-SSSE3-NEXT: movq %rsi, %rcx
+; SSE2-SSSE3-NEXT: movq %rsi, %rdx
+; SSE2-SSSE3-NEXT: movq %rsi, %rdi
+; SSE2-SSSE3-NEXT: movq %rsi, %rbp
+; SSE2-SSSE3-NEXT: shrq $15, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm1
+; SSE2-SSSE3-NEXT: movq %rsi, %rbp
+; SSE2-SSSE3-NEXT: movsbq %sil, %rsi
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-SSSE3-NEXT: shlq $49, %r8
+; SSE2-SSSE3-NEXT: sarq $63, %r8
+; SSE2-SSSE3-NEXT: movd %r8d, %xmm13
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3],xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
+; SSE2-SSSE3-NEXT: shlq $50, %r9
+; SSE2-SSSE3-NEXT: sarq $63, %r9
+; SSE2-SSSE3-NEXT: movd %r9d, %xmm1
+; SSE2-SSSE3-NEXT: shlq $51, %r10
+; SSE2-SSSE3-NEXT: sarq $63, %r10
+; SSE2-SSSE3-NEXT: movd %r10d, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-SSSE3-NEXT: shlq $52, %r11
+; SSE2-SSSE3-NEXT: sarq $63, %r11
+; SSE2-SSSE3-NEXT: movd %r11d, %xmm8
+; SSE2-SSSE3-NEXT: shlq $53, %r14
+; SSE2-SSSE3-NEXT: sarq $63, %r14
+; SSE2-SSSE3-NEXT: movd %r14d, %xmm15
+; SSE2-SSSE3-NEXT: shlq $54, %r15
+; SSE2-SSSE3-NEXT: sarq $63, %r15
+; SSE2-SSSE3-NEXT: movd %r15d, %xmm9
+; SSE2-SSSE3-NEXT: shlq $55, %r12
+; SSE2-SSSE3-NEXT: sarq $63, %r12
+; SSE2-SSSE3-NEXT: movd %r12d, %xmm4
+; SSE2-SSSE3-NEXT: shlq $60, %r13
+; SSE2-SSSE3-NEXT: sarq $63, %r13
+; SSE2-SSSE3-NEXT: movd %r13d, %xmm10
+; SSE2-SSSE3-NEXT: shlq $61, %rbx
+; SSE2-SSSE3-NEXT: sarq $63, %rbx
+; SSE2-SSSE3-NEXT: movd %ebx, %xmm7
+; SSE2-SSSE3-NEXT: shlq $62, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm11
+; SSE2-SSSE3-NEXT: shlq $63, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shlq $58, %rdx
+; SSE2-SSSE3-NEXT: sarq $63, %rdx
+; SSE2-SSSE3-NEXT: movd %edx, %xmm12
+; SSE2-SSSE3-NEXT: shlq $59, %rdi
+; SSE2-SSSE3-NEXT: sarq $63, %rdi
+; SSE2-SSSE3-NEXT: movd %edi, %xmm5
+; SSE2-SSSE3-NEXT: shlq $57, %rbp
+; SSE2-SSSE3-NEXT: sarq $63, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm1
+; SSE2-SSSE3-NEXT: shrq $7, %rsi
+; SSE2-SSSE3-NEXT: movd %esi, %xmm14
+; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi
+; SSE2-SSSE3-NEXT: movq %rsi, %r8
+; SSE2-SSSE3-NEXT: movq %rsi, %r9
+; SSE2-SSSE3-NEXT: movq %rsi, %r10
+; SSE2-SSSE3-NEXT: movq %rsi, %r11
+; SSE2-SSSE3-NEXT: movq %rsi, %r14
+; SSE2-SSSE3-NEXT: movq %rsi, %r15
+; SSE2-SSSE3-NEXT: movq %rsi, %r12
+; SSE2-SSSE3-NEXT: movq %rsi, %r13
+; SSE2-SSSE3-NEXT: movq %rsi, %rbx
+; SSE2-SSSE3-NEXT: movq %rsi, %rax
+; SSE2-SSSE3-NEXT: movq %rsi, %rcx
+; SSE2-SSSE3-NEXT: movq %rsi, %rdx
+; SSE2-SSSE3-NEXT: movq %rsi, %rdi
+; SSE2-SSSE3-NEXT: movq %rsi, %rbp
+; SSE2-SSSE3-NEXT: shrq $15, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm6
+; SSE2-SSSE3-NEXT: movq %rsi, %rbp
+; SSE2-SSSE3-NEXT: movsbq %sil, %rsi
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; SSE2-SSSE3-NEXT: shlq $49, %r8
+; SSE2-SSSE3-NEXT: sarq $63, %r8
+; SSE2-SSSE3-NEXT: movd %r8d, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSE2-SSSE3-NEXT: shlq $50, %r9
+; SSE2-SSSE3-NEXT: sarq $63, %r9
+; SSE2-SSSE3-NEXT: movd %r9d, %xmm3
+; SSE2-SSSE3-NEXT: shlq $51, %r10
+; SSE2-SSSE3-NEXT: sarq $63, %r10
+; SSE2-SSSE3-NEXT: movd %r10d, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; SSE2-SSSE3-NEXT: shlq $52, %r11
+; SSE2-SSSE3-NEXT: sarq $63, %r11
+; SSE2-SSSE3-NEXT: movd %r11d, %xmm8
+; SSE2-SSSE3-NEXT: shlq $53, %r14
+; SSE2-SSSE3-NEXT: sarq $63, %r14
+; SSE2-SSSE3-NEXT: movd %r14d, %xmm13
+; SSE2-SSSE3-NEXT: shlq $54, %r15
+; SSE2-SSSE3-NEXT: sarq $63, %r15
+; SSE2-SSSE3-NEXT: movd %r15d, %xmm9
+; SSE2-SSSE3-NEXT: shlq $55, %r12
+; SSE2-SSSE3-NEXT: sarq $63, %r12
+; SSE2-SSSE3-NEXT: movd %r12d, %xmm1
+; SSE2-SSSE3-NEXT: shlq $60, %r13
+; SSE2-SSSE3-NEXT: sarq $63, %r13
+; SSE2-SSSE3-NEXT: movd %r13d, %xmm10
+; SSE2-SSSE3-NEXT: shlq $61, %rbx
+; SSE2-SSSE3-NEXT: sarq $63, %rbx
+; SSE2-SSSE3-NEXT: movd %ebx, %xmm15
+; SSE2-SSSE3-NEXT: shlq $62, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm11
+; SSE2-SSSE3-NEXT: shlq $63, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: shlq $58, %rdx
+; SSE2-SSSE3-NEXT: sarq $63, %rdx
+; SSE2-SSSE3-NEXT: movd %edx, %xmm12
+; SSE2-SSSE3-NEXT: shlq $59, %rdi
+; SSE2-SSSE3-NEXT: sarq $63, %rdi
+; SSE2-SSSE3-NEXT: movd %edi, %xmm5
+; SSE2-SSSE3-NEXT: shlq $57, %rbp
+; SSE2-SSSE3-NEXT: sarq $63, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm6
+; SSE2-SSSE3-NEXT: shrq $7, %rsi
+; SSE2-SSSE3-NEXT: movd %esi, %xmm14
+; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi
+; SSE2-SSSE3-NEXT: movq %rsi, %r8
+; SSE2-SSSE3-NEXT: movq %rsi, %r9
+; SSE2-SSSE3-NEXT: movq %rsi, %r10
+; SSE2-SSSE3-NEXT: movq %rsi, %r11
+; SSE2-SSSE3-NEXT: movq %rsi, %r14
+; SSE2-SSSE3-NEXT: movq %rsi, %r15
+; SSE2-SSSE3-NEXT: movq %rsi, %r12
+; SSE2-SSSE3-NEXT: movq %rsi, %r13
+; SSE2-SSSE3-NEXT: movq %rsi, %rbx
+; SSE2-SSSE3-NEXT: movq %rsi, %rax
+; SSE2-SSSE3-NEXT: movq %rsi, %rcx
+; SSE2-SSSE3-NEXT: movq %rsi, %rdx
+; SSE2-SSSE3-NEXT: movq %rsi, %rdi
+; SSE2-SSSE3-NEXT: movq %rsi, %rbp
+; SSE2-SSSE3-NEXT: shrq $15, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm7
+; SSE2-SSSE3-NEXT: movq %rsi, %rbp
+; SSE2-SSSE3-NEXT: movsbq %sil, %rsi
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1],xmm3[2],xmm11[2],xmm3[3],xmm11[3],xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3],xmm6[4],xmm14[4],xmm6[5],xmm14[5],xmm6[6],xmm14[6],xmm6[7],xmm14[7]
+; SSE2-SSSE3-NEXT: shlq $49, %r8
+; SSE2-SSSE3-NEXT: sarq $63, %r8
+; SSE2-SSSE3-NEXT: movd %r8d, %xmm4
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE2-SSSE3-NEXT: shlq $50, %r9
+; SSE2-SSSE3-NEXT: sarq $63, %r9
+; SSE2-SSSE3-NEXT: movd %r9d, %xmm6
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE2-SSSE3-NEXT: shlq $51, %r10
+; SSE2-SSSE3-NEXT: sarq $63, %r10
+; SSE2-SSSE3-NEXT: movd %r10d, %xmm5
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
+; SSE2-SSSE3-NEXT: shlq $52, %r11
+; SSE2-SSSE3-NEXT: sarq $63, %r11
+; SSE2-SSSE3-NEXT: movd %r11d, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
+; SSE2-SSSE3-NEXT: shlq $53, %r14
+; SSE2-SSSE3-NEXT: sarq $63, %r14
+; SSE2-SSSE3-NEXT: movd %r14d, %xmm7
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE2-SSSE3-NEXT: shlq $54, %r15
+; SSE2-SSSE3-NEXT: sarq $63, %r15
+; SSE2-SSSE3-NEXT: movd %r15d, %xmm6
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-SSSE3-NEXT: shlq $55, %r12
+; SSE2-SSSE3-NEXT: sarq $63, %r12
+; SSE2-SSSE3-NEXT: movd %r12d, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3],xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
+; SSE2-SSSE3-NEXT: shlq $60, %r13
+; SSE2-SSSE3-NEXT: sarq $63, %r13
+; SSE2-SSSE3-NEXT: movd %r13d, %xmm8
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; SSE2-SSSE3-NEXT: shlq $61, %rbx
+; SSE2-SSSE3-NEXT: sarq $63, %rbx
+; SSE2-SSSE3-NEXT: movd %ebx, %xmm6
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
+; SSE2-SSSE3-NEXT: shlq $62, %rax
+; SSE2-SSSE3-NEXT: sarq $63, %rax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm7
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-SSSE3-NEXT: shlq $63, %rcx
+; SSE2-SSSE3-NEXT: sarq $63, %rcx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; SSE2-SSSE3-NEXT: shlq $58, %rdx
+; SSE2-SSSE3-NEXT: sarq $63, %rdx
+; SSE2-SSSE3-NEXT: movd %edx, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; SSE2-SSSE3-NEXT: shlq $59, %rdi
+; SSE2-SSSE3-NEXT: sarq $63, %rdi
+; SSE2-SSSE3-NEXT: movd %edi, %xmm7
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; SSE2-SSSE3-NEXT: shlq $57, %rbp
+; SSE2-SSSE3-NEXT: sarq $63, %rbp
+; SSE2-SSSE3-NEXT: movd %ebp, %xmm5
+; SSE2-SSSE3-NEXT: shrq $7, %rsi
+; SSE2-SSSE3-NEXT: movd %esi, %xmm6
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; SSE2-SSSE3-NEXT: popq %rbx
+; SSE2-SSSE3-NEXT: popq %r12
+; SSE2-SSSE3-NEXT: popq %r13
+; SSE2-SSSE3-NEXT: popq %r14
+; SSE2-SSSE3-NEXT: popq %r15
+; SSE2-SSSE3-NEXT: popq %rbp
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i64_64i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi28:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi29:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi30:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $128, %rsp
+; AVX1-NEXT: .Lcfi31:
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .Lcfi32:
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .Lcfi33:
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .Lcfi34:
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .Lcfi35:
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $32, %rdi
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rdx
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: shlq $47, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: movq %rdx, %r13
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: movq %rdx, %r10
+; AVX1-NEXT: movq %rdx, %r11
+; AVX1-NEXT: movq %rdx, %r9
+; AVX1-NEXT: movq %rdx, %rbx
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: movq %rdx, %r15
+; AVX1-NEXT: movq %rdx, %r12
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: shlq $46, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX1-NEXT: shlq $45, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: shlq $44, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: shlq $43, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: shlq $42, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: shlq $41, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r13
+; AVX1-NEXT: shlq $40, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: shlq $39, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r10
+; AVX1-NEXT: shlq $38, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
+; AVX1-NEXT: movsbq %dl, %rax
+; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: shlq $37, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r9
+; AVX1-NEXT: shlq $36, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rbx
+; AVX1-NEXT: shlq $35, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: shlq $34, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r15
+; AVX1-NEXT: shlq $33, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %r12
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX1-NEXT: shrq $31, %rax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: shlq $63, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vmovd %r8d, %xmm1
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: movswq %dx, %rdx
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; AVX1-NEXT: shlq $62, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $61, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $60, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
+; AVX1-NEXT: shlq $59, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $58, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
+; AVX1-NEXT: shlq $57, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; AVX1-NEXT: shrq $7, %rcx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $55, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $54, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $53, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $52, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $51, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $50, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: shlq $49, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1
+; AVX1-NEXT: shrq $15, %rdx
+; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rdx
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: shlq $47, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: movq %rdx, %r13
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: movq %rdx, %r9
+; AVX1-NEXT: movq %rdx, %r12
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: movq %rdx, %rbx
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: movq %rdx, %r10
+; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: movq %rdx, %r11
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: movq %rdx, %r15
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: shlq $46, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: shlq $45, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vpinsrb $2, %r13d, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %r13
+; AVX1-NEXT: shlq $44, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: shlq $43, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %r9
+; AVX1-NEXT: shlq $42, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpinsrb $5, %r12d, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %r12
+; AVX1-NEXT: shlq $41, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: shlq $40, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpinsrb $7, %ebx, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %rbx
+; AVX1-NEXT: shlq $39, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpinsrb $8, %r8d, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: shlq $38, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpinsrb $9, %r10d, %xmm2, %xmm2
+; AVX1-NEXT: movsbq %dl, %rax
+; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX1-NEXT: shlq $37, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %r10
+; AVX1-NEXT: shlq $36, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpinsrb $11, %esi, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: shlq $35, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpinsrb $12, %r11d, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %r11
+; AVX1-NEXT: shlq $34, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpinsrb $13, %r14d, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %r14
+; AVX1-NEXT: shlq $33, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vpinsrb $14, %r15d, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %r15
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX1-NEXT: shrq $31, %rax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: shlq $63, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm3
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: movswq %dx, %rdx
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: shlq $62, %r13
+; AVX1-NEXT: sarq $63, %r13
+; AVX1-NEXT: vpinsrb $1, %r13d, %xmm3, %xmm1
+; AVX1-NEXT: shlq $61, %r9
+; AVX1-NEXT: sarq $63, %r9
+; AVX1-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $60, %r12
+; AVX1-NEXT: sarq $63, %r12
+; AVX1-NEXT: vpinsrb $3, %r12d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $59, %rdi
+; AVX1-NEXT: sarq $63, %rdi
+; AVX1-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
+; AVX1-NEXT: shlq $58, %rbx
+; AVX1-NEXT: sarq $63, %rbx
+; AVX1-NEXT: vpinsrb $5, %ebx, %xmm1, %xmm1
+; AVX1-NEXT: shlq $57, %r8
+; AVX1-NEXT: sarq $63, %r8
+; AVX1-NEXT: vpinsrb $6, %r8d, %xmm1, %xmm1
+; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; AVX1-NEXT: shrq $7, %rdi
+; AVX1-NEXT: vpinsrb $7, %edi, %xmm1, %xmm1
+; AVX1-NEXT: shlq $55, %r10
+; AVX1-NEXT: sarq $63, %r10
+; AVX1-NEXT: vpinsrb $8, %r10d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $54, %rsi
+; AVX1-NEXT: sarq $63, %rsi
+; AVX1-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1
+; AVX1-NEXT: shlq $53, %r11
+; AVX1-NEXT: sarq $63, %r11
+; AVX1-NEXT: vpinsrb $10, %r11d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $52, %r14
+; AVX1-NEXT: sarq $63, %r14
+; AVX1-NEXT: vpinsrb $11, %r14d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $51, %r15
+; AVX1-NEXT: sarq $63, %r15
+; AVX1-NEXT: vpinsrb $12, %r15d, %xmm1, %xmm1
+; AVX1-NEXT: shlq $50, %rax
+; AVX1-NEXT: sarq $63, %rax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: shlq $49, %rcx
+; AVX1-NEXT: sarq $63, %rcx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shrq $15, %rdx
+; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: leaq -40(%rbp), %rsp
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i64_64i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi28:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi29:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi30:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $128, %rsp
+; AVX2-NEXT: .Lcfi31:
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .Lcfi32:
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .Lcfi33:
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .Lcfi34:
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .Lcfi35:
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $32, %rdi
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: shlq $47, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: movq %rdx, %r13
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: movq %rdx, %r11
+; AVX2-NEXT: movq %rdx, %r9
+; AVX2-NEXT: movq %rdx, %rbx
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: movq %rdx, %r15
+; AVX2-NEXT: movq %rdx, %r12
+; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: shlq $46, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX2-NEXT: shlq $45, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: shlq $44, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: shlq $43, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: shlq $42, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: shlq $41, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r13
+; AVX2-NEXT: shlq $40, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: shlq $39, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: shlq $38, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
+; AVX2-NEXT: movsbq %dl, %rax
+; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: shlq $37, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r9
+; AVX2-NEXT: shlq $36, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rbx
+; AVX2-NEXT: shlq $35, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: shlq $34, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r15
+; AVX2-NEXT: shlq $33, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %r12
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX2-NEXT: shrq $31, %rax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: shlq $63, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vmovd %r8d, %xmm1
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: movswq %dx, %rdx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; AVX2-NEXT: shlq $62, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $61, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $60, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
+; AVX2-NEXT: shlq $59, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $58, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
+; AVX2-NEXT: shlq $57, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; AVX2-NEXT: shrq $7, %rcx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $55, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $54, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $53, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $52, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $51, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $50, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: shlq $49, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1
+; AVX2-NEXT: shrq $15, %rdx
+; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: shlq $47, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: movq %rdx, %r13
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: movq %rdx, %r9
+; AVX2-NEXT: movq %rdx, %r12
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: movq %rdx, %rbx
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: movq %rdx, %r11
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: movq %rdx, %r15
+; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: shlq $46, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: shlq $45, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vpinsrb $2, %r13d, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %r13
+; AVX2-NEXT: shlq $44, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: shlq $43, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %r9
+; AVX2-NEXT: shlq $42, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpinsrb $5, %r12d, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %r12
+; AVX2-NEXT: shlq $41, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: shlq $40, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpinsrb $7, %ebx, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %rbx
+; AVX2-NEXT: shlq $39, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpinsrb $8, %r8d, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: shlq $38, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpinsrb $9, %r10d, %xmm2, %xmm2
+; AVX2-NEXT: movsbq %dl, %rax
+; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX2-NEXT: shlq $37, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: shlq $36, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpinsrb $11, %esi, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: shlq $35, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpinsrb $12, %r11d, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %r11
+; AVX2-NEXT: shlq $34, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpinsrb $13, %r14d, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %r14
+; AVX2-NEXT: shlq $33, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vpinsrb $14, %r15d, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %r15
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; AVX2-NEXT: shrq $31, %rax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: shlq $63, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vmovd %ecx, %xmm3
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: movswq %dx, %rdx
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: shlq $62, %r13
+; AVX2-NEXT: sarq $63, %r13
+; AVX2-NEXT: vpinsrb $1, %r13d, %xmm3, %xmm1
+; AVX2-NEXT: shlq $61, %r9
+; AVX2-NEXT: sarq $63, %r9
+; AVX2-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $60, %r12
+; AVX2-NEXT: sarq $63, %r12
+; AVX2-NEXT: vpinsrb $3, %r12d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $59, %rdi
+; AVX2-NEXT: sarq $63, %rdi
+; AVX2-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
+; AVX2-NEXT: shlq $58, %rbx
+; AVX2-NEXT: sarq $63, %rbx
+; AVX2-NEXT: vpinsrb $5, %ebx, %xmm1, %xmm1
+; AVX2-NEXT: shlq $57, %r8
+; AVX2-NEXT: sarq $63, %r8
+; AVX2-NEXT: vpinsrb $6, %r8d, %xmm1, %xmm1
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; AVX2-NEXT: shrq $7, %rdi
+; AVX2-NEXT: vpinsrb $7, %edi, %xmm1, %xmm1
+; AVX2-NEXT: shlq $55, %r10
+; AVX2-NEXT: sarq $63, %r10
+; AVX2-NEXT: vpinsrb $8, %r10d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $54, %rsi
+; AVX2-NEXT: sarq $63, %rsi
+; AVX2-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1
+; AVX2-NEXT: shlq $53, %r11
+; AVX2-NEXT: sarq $63, %r11
+; AVX2-NEXT: vpinsrb $10, %r11d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $52, %r14
+; AVX2-NEXT: sarq $63, %r14
+; AVX2-NEXT: vpinsrb $11, %r14d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $51, %r15
+; AVX2-NEXT: sarq $63, %r15
+; AVX2-NEXT: vpinsrb $12, %r15d, %xmm1, %xmm1
+; AVX2-NEXT: shlq $50, %rax
+; AVX2-NEXT: sarq $63, %rax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: shlq $49, %rcx
+; AVX2-NEXT: sarq $63, %rcx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shrq $15, %rdx
+; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX2-NEXT: leaq -40(%rbp), %rsp
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i64_64i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovq %rdi, %k0
+; AVX512-NEXT: vpmovm2b %k0, %zmm0
+; AVX512-NEXT: retq
+ %1 = bitcast i64 %a0 to <64 x i1>
+ %2 = sext <64 x i1> %1 to <64 x i8>
+ ret <64 x i8> %2
+}
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
new file mode 100644
index 000000000000..aa9e60df1404
--- /dev/null
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
@@ -0,0 +1,3279 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512
+
+;
+; 128-bit vectors
+;
+
+define <2 x i64> @ext_i2_2i64(i2 %a0) {
+; SSE2-SSSE3-LABEL: ext_i2_2i64:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: andb $3, %dil
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
+; SSE2-SSSE3-NEXT: shrl %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %xmm1
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: ext_i2_2i64:
+; AVX12: # BB#0:
+; AVX12-NEXT: andb $3, %dil
+; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vmovq %rcx, %xmm0
+; AVX12-NEXT: shrl %eax
+; AVX12-NEXT: andl $1, %eax
+; AVX12-NEXT: vmovq %rax, %xmm1
+; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: ext_i2_2i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: andb $3, %dil
+; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX512-NEXT: kmovd %eax, %k1
+; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = bitcast i2 %a0 to <2 x i1>
+ %2 = zext <2 x i1> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define <4 x i32> @ext_i4_4i32(i4 %a0) {
+; SSE2-SSSE3-LABEL: ext_i4_4i32:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: andb $15, %dil
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-SSSE3-NEXT: movd %eax, %xmm0
+; SSE2-SSSE3-NEXT: shrl %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm2
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i4_4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: andb $15, %dil
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $3, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i4_4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: andb $15, %dil
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $3, %eax
+; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i4_4i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: andb $15, %dil
+; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX512-NEXT: kmovd %eax, %k1
+; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = bitcast i4 %a0 to <4 x i1>
+ %2 = zext <4 x i1> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @ext_i8_8i16(i8 %a0) {
+; SSE2-SSSE3-LABEL: ext_i8_8i16:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: shrl $7, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm3
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: ext_i8_8i16:
+; AVX12: # BB#0:
+; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: movl %eax, %edx
+; AVX12-NEXT: andl $1, %edx
+; AVX12-NEXT: vmovd %edx, %xmm0
+; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $2, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $3, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $4, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $5, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $6, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: shrl $7, %eax
+; AVX12-NEXT: movzwl %ax, %eax
+; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: ext_i8_8i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k5
+; AVX512-NEXT: kshiftlw $8, %k5, %k0
+; AVX512-NEXT: kshiftrw $15, %k0, %k0
+; AVX512-NEXT: kshiftlw $9, %k5, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kshiftlw $10, %k5, %k2
+; AVX512-NEXT: kshiftrw $15, %k2, %k2
+; AVX512-NEXT: kshiftlw $11, %k5, %k3
+; AVX512-NEXT: kshiftrw $15, %k3, %k3
+; AVX512-NEXT: kshiftlw $12, %k5, %k4
+; AVX512-NEXT: kshiftrw $15, %k4, %k4
+; AVX512-NEXT: kshiftlw $13, %k5, %k6
+; AVX512-NEXT: kshiftrw $15, %k6, %k6
+; AVX512-NEXT: kshiftlw $15, %k5, %k7
+; AVX512-NEXT: kshiftrw $15, %k7, %k7
+; AVX512-NEXT: kshiftlw $14, %k5, %k5
+; AVX512-NEXT: kshiftrw $15, %k5, %k5
+; AVX512-NEXT: kmovd %k5, %eax
+; AVX512-NEXT: andl $1, %eax
+; AVX512-NEXT: kmovd %k7, %ecx
+; AVX512-NEXT: andl $1, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm0
+; AVX512-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX512-NEXT: kmovd %k6, %eax
+; AVX512-NEXT: andl $1, %eax
+; AVX512-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX512-NEXT: kmovd %k4, %eax
+; AVX512-NEXT: andl $1, %eax
+; AVX512-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX512-NEXT: kmovd %k3, %eax
+; AVX512-NEXT: andl $1, %eax
+; AVX512-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX512-NEXT: kmovd %k2, %eax
+; AVX512-NEXT: andl $1, %eax
+; AVX512-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX512-NEXT: kmovd %k1, %eax
+; AVX512-NEXT: andl $1, %eax
+; AVX512-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: andl $1, %eax
+; AVX512-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %1 = bitcast i8 %a0 to <8 x i1>
+ %2 = zext <8 x i1> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @ext_i16_16i8(i16 %a0) {
+; SSE2-SSSE3-LABEL: ext_i16_16i8:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: ext_i16_16i8:
+; AVX12: # BB#0:
+; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: movl %eax, %edx
+; AVX12-NEXT: andl $1, %edx
+; AVX12-NEXT: vmovd %edx, %xmm0
+; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $2, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $3, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $4, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $5, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $6, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $7, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $8, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $9, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $10, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $11, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $12, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $13, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $14, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: shrl $15, %eax
+; AVX12-NEXT: movzwl %ax, %eax
+; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: ext_i16_16i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: .Lcfi0:
+; AVX512-NEXT: .cfi_def_cfa_offset 16
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: .Lcfi1:
+; AVX512-NEXT: .cfi_def_cfa_offset 24
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: .Lcfi2:
+; AVX512-NEXT: .cfi_def_cfa_offset 32
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: .Lcfi3:
+; AVX512-NEXT: .cfi_def_cfa_offset 40
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: .Lcfi4:
+; AVX512-NEXT: .cfi_def_cfa_offset 48
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: .Lcfi5:
+; AVX512-NEXT: .cfi_def_cfa_offset 56
+; AVX512-NEXT: .Lcfi6:
+; AVX512-NEXT: .cfi_offset %rbx, -56
+; AVX512-NEXT: .Lcfi7:
+; AVX512-NEXT: .cfi_offset %r12, -48
+; AVX512-NEXT: .Lcfi8:
+; AVX512-NEXT: .cfi_offset %r13, -40
+; AVX512-NEXT: .Lcfi9:
+; AVX512-NEXT: .cfi_offset %r14, -32
+; AVX512-NEXT: .Lcfi10:
+; AVX512-NEXT: .cfi_offset %r15, -24
+; AVX512-NEXT: .Lcfi11:
+; AVX512-NEXT: .cfi_offset %rbp, -16
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: kshiftlw $14, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %r8d
+; AVX512-NEXT: kshiftlw $15, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %r9d
+; AVX512-NEXT: kshiftlw $13, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %r10d
+; AVX512-NEXT: kshiftlw $12, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %r11d
+; AVX512-NEXT: kshiftlw $11, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %r14d
+; AVX512-NEXT: kshiftlw $10, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %r15d
+; AVX512-NEXT: kshiftlw $9, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %r12d
+; AVX512-NEXT: kshiftlw $8, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %r13d
+; AVX512-NEXT: kshiftlw $7, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %esi
+; AVX512-NEXT: kshiftlw $6, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %ebx
+; AVX512-NEXT: kshiftlw $5, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %ebp
+; AVX512-NEXT: kshiftlw $4, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %edi
+; AVX512-NEXT: kshiftlw $3, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %eax
+; AVX512-NEXT: kshiftlw $2, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %ecx
+; AVX512-NEXT: kshiftlw $1, %k0, %k1
+; AVX512-NEXT: kshiftrw $15, %k1, %k1
+; AVX512-NEXT: kmovd %k1, %edx
+; AVX512-NEXT: kshiftrw $15, %k0, %k0
+; AVX512-NEXT: vmovd %r9d, %xmm0
+; AVX512-NEXT: kmovd %k0, %r9d
+; AVX512-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $2, %r10d, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $10, %ebp, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $11, %edi, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $14, %edx, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrb $15, %r9d, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %1 = bitcast i16 %a0 to <16 x i1>
+ %2 = zext <16 x i1> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; 256-bit vectors
+;
+
+define <4 x i64> @ext_i4_4i64(i4 %a0) {
+; SSE2-SSSE3-LABEL: ext_i4_4i64:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: andb $15, %dil
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-SSSE3-NEXT: movd %eax, %xmm2
+; SSE2-SSSE3-NEXT: shrl %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm0
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i4_4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: andb $15, %dil
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovq %rcx, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovq %rcx, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovq %rcx, %xmm1
+; AVX1-NEXT: shrl %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i4_4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: andb $15, %dil
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $3, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovq %rcx, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovq %rcx, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovq %rcx, %xmm1
+; AVX2-NEXT: shrl %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i4_4i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: andb $15, %dil
+; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX512-NEXT: kmovd %eax, %k1
+; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: retq
+ %1 = bitcast i4 %a0 to <4 x i1>
+ %2 = zext <4 x i1> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define <8 x i32> @ext_i8_8i32(i8 %a0) {
+; SSE2-SSSE3-LABEL: ext_i8_8i32:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: shrl $7, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm3
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i8_8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $5, %ecx
+; AVX1-NEXT: movl %eax, %edx
+; AVX1-NEXT: shrl $4, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $6, %ecx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $7, %ecx
+; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shrl $3, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i8_8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $5, %ecx
+; AVX2-NEXT: movl %eax, %edx
+; AVX2-NEXT: shrl $4, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $6, %ecx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $7, %ecx
+; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shrl $3, %eax
+; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i8_8i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i8 %a0 to <8 x i1>
+ %2 = zext <8 x i1> %1 to <8 x i32>
+ ret <8 x i32> %2
+}
+
+define <16 x i16> @ext_i16_16i16(i16 %a0) {
+; SSE2-SSSE3-LABEL: ext_i16_16i16:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i16_16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $9, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: movl %eax, %edx
+; AVX1-NEXT: shrl $8, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $10, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $11, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $12, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $13, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $14, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $15, %ecx
+; AVX1-NEXT: movzwl %cx, %ecx
+; AVX1-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: movl %eax, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: vmovd %edx, %xmm1
+; AVX1-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $5, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $6, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shrl $7, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i16_16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $9, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: movl %eax, %edx
+; AVX2-NEXT: shrl $8, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $10, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $11, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $12, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $13, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $14, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $15, %ecx
+; AVX2-NEXT: movzwl %cx, %ecx
+; AVX2-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: movl %eax, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: vmovd %edx, %xmm1
+; AVX2-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $3, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $5, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $6, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shrl $7, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i16_16i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i16 %a0 to <16 x i1>
+ %2 = zext <16 x i1> %1 to <16 x i16>
+ ret <16 x i16> %2
+}
+
+define <32 x i8> @ext_i32_32i8(i32 %a0) {
+; SSE2-SSSE3-LABEL: ext_i32_32i8:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: shrl $16, %edi
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i32_32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi0:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi1:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi2:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $32, %rsp
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $17, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movl %edi, %ecx
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $18, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $19, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $20, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $21, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $22, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $23, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $25, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $26, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $27, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $28, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $29, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $30, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $31, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movl %edi, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm1
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $3, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $5, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $6, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $7, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $8, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $9, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $10, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $11, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $12, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $13, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $14, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX1-NEXT: shrl $15, %edi
+; AVX1-NEXT: andl $1, %edi
+; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i32_32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi0:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi1:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi2:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $32, %rsp
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $17, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movl %edi, %ecx
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $18, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $19, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $20, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $21, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $22, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $23, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $24, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $25, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $26, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $27, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $28, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $29, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $30, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $31, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movl %edi, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm1
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $3, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $4, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $5, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $6, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $7, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $8, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $9, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $10, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $11, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $12, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $13, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $14, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX2-NEXT: shrl $15, %edi
+; AVX2-NEXT: andl $1, %edi
+; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i32_32i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i32 %a0 to <32 x i1>
+ %2 = zext <32 x i1> %1 to <32 x i8>
+ ret <32 x i8> %2
+}
+
+;
+; 512-bit vectors
+;
+
+define <8 x i64> @ext_i8_8i64(i8 %a0) {
+; SSE2-SSSE3-LABEL: ext_i8_8i64:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: shrl $7, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3]
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3]
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3]
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3]
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i8_8i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: movl %eax, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $5, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $6, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $7, %eax
+; AVX1-NEXT: movzwl %ax, %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i8_8i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: movl %eax, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $3, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $5, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $6, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $7, %eax
+; AVX2-NEXT: movzwl %ax, %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1
+; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i8_8i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i8 %a0 to <8 x i1>
+ %2 = zext <8 x i1> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define <16 x i32> @ext_i16_16i32(i16 %a0) {
+; SSE2-SSSE3-LABEL: ext_i16_16i32:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i16_16i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: movl %eax, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $5, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $6, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $7, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $8, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $9, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $10, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $11, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $12, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $13, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $14, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $15, %eax
+; AVX1-NEXT: movzwl %ax, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i16_16i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: movl %eax, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $3, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $5, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $6, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $7, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $8, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $9, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $10, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $11, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $12, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $13, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $14, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $15, %eax
+; AVX2-NEXT: movzwl %ax, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
+; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i16_16i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i16 %a0 to <16 x i1>
+ %2 = zext <16 x i1> %1 to <16 x i32>
+ ret <16 x i32> %2
+}
+
+define <32 x i16> @ext_i32_32i16(i32 %a0) {
+; SSE2-SSSE3-LABEL: ext_i32_32i16:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movl %edi, %eax
+; SSE2-SSSE3-NEXT: shrl $16, %eax
+; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i32_32i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi3:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi4:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi5:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $128, %rsp
+; AVX1-NEXT: .Lcfi6:
+; AVX1-NEXT: .cfi_offset %rbx, -56
+; AVX1-NEXT: .Lcfi7:
+; AVX1-NEXT: .cfi_offset %r12, -48
+; AVX1-NEXT: .Lcfi8:
+; AVX1-NEXT: .cfi_offset %r13, -40
+; AVX1-NEXT: .Lcfi9:
+; AVX1-NEXT: .cfi_offset %r14, -32
+; AVX1-NEXT: .Lcfi10:
+; AVX1-NEXT: .cfi_offset %r15, -24
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX1-NEXT: movl %edi, %r13d
+; AVX1-NEXT: movl %edi, %r12d
+; AVX1-NEXT: movl %edi, %r15d
+; AVX1-NEXT: movl %edi, %r14d
+; AVX1-NEXT: movl %edi, %ebx
+; AVX1-NEXT: movl %edi, %r11d
+; AVX1-NEXT: movl %edi, %r10d
+; AVX1-NEXT: movl %edi, %r9d
+; AVX1-NEXT: movl %edi, %r8d
+; AVX1-NEXT: movl %edi, %esi
+; AVX1-NEXT: movl %edi, %edx
+; AVX1-NEXT: movl %edi, %ecx
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: andl $1, %edi
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: shrl %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $3, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $4, %esi
+; AVX1-NEXT: andl $1, %esi
+; AVX1-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0
+; AVX1-NEXT: shrl $5, %r8d
+; AVX1-NEXT: andl $1, %r8d
+; AVX1-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $6, %r9d
+; AVX1-NEXT: andl $1, %r9d
+; AVX1-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $7, %r10d
+; AVX1-NEXT: andl $1, %r10d
+; AVX1-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $8, %r11d
+; AVX1-NEXT: andl $1, %r11d
+; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $9, %ebx
+; AVX1-NEXT: andl $1, %ebx
+; AVX1-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $10, %r14d
+; AVX1-NEXT: andl $1, %r14d
+; AVX1-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $11, %r15d
+; AVX1-NEXT: andl $1, %r15d
+; AVX1-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $12, %r12d
+; AVX1-NEXT: andl $1, %r12d
+; AVX1-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0
+; AVX1-NEXT: shrl $13, %r13d
+; AVX1-NEXT: andl $1, %r13d
+; AVX1-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $14, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $15, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $16, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $17, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $18, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $19, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $20, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $21, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $22, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $23, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $25, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $26, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $27, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $28, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $29, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $30, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX1-NEXT: shrl $31, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: leaq -40(%rbp), %rsp
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i32_32i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi3:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi4:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi5:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $128, %rsp
+; AVX2-NEXT: .Lcfi6:
+; AVX2-NEXT: .cfi_offset %rbx, -56
+; AVX2-NEXT: .Lcfi7:
+; AVX2-NEXT: .cfi_offset %r12, -48
+; AVX2-NEXT: .Lcfi8:
+; AVX2-NEXT: .cfi_offset %r13, -40
+; AVX2-NEXT: .Lcfi9:
+; AVX2-NEXT: .cfi_offset %r14, -32
+; AVX2-NEXT: .Lcfi10:
+; AVX2-NEXT: .cfi_offset %r15, -24
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
+; AVX2-NEXT: movl %edi, %r13d
+; AVX2-NEXT: movl %edi, %r12d
+; AVX2-NEXT: movl %edi, %r15d
+; AVX2-NEXT: movl %edi, %r14d
+; AVX2-NEXT: movl %edi, %ebx
+; AVX2-NEXT: movl %edi, %r11d
+; AVX2-NEXT: movl %edi, %r10d
+; AVX2-NEXT: movl %edi, %r9d
+; AVX2-NEXT: movl %edi, %r8d
+; AVX2-NEXT: movl %edi, %esi
+; AVX2-NEXT: movl %edi, %edx
+; AVX2-NEXT: movl %edi, %ecx
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: andl $1, %edi
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: shrl %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $3, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $4, %esi
+; AVX2-NEXT: andl $1, %esi
+; AVX2-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0
+; AVX2-NEXT: shrl $5, %r8d
+; AVX2-NEXT: andl $1, %r8d
+; AVX2-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $6, %r9d
+; AVX2-NEXT: andl $1, %r9d
+; AVX2-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $7, %r10d
+; AVX2-NEXT: andl $1, %r10d
+; AVX2-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $8, %r11d
+; AVX2-NEXT: andl $1, %r11d
+; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $9, %ebx
+; AVX2-NEXT: andl $1, %ebx
+; AVX2-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $10, %r14d
+; AVX2-NEXT: andl $1, %r14d
+; AVX2-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $11, %r15d
+; AVX2-NEXT: andl $1, %r15d
+; AVX2-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $12, %r12d
+; AVX2-NEXT: andl $1, %r12d
+; AVX2-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0
+; AVX2-NEXT: shrl $13, %r13d
+; AVX2-NEXT: andl $1, %r13d
+; AVX2-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $14, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $15, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $16, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $17, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $18, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $19, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $20, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $21, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $22, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $23, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $24, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $25, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $26, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $27, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $28, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $29, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $30, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
+; AVX2-NEXT: shrl $31, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: leaq -40(%rbp), %rsp
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i32_32i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i32 %a0 to <32 x i1>
+ %2 = zext <32 x i1> %1 to <32 x i16>
+ ret <32 x i16> %2
+}
+
+define <64 x i8> @ext_i64_64i8(i64 %a0) {
+; SSE2-SSSE3-LABEL: ext_i64_64i8:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movq %rdi, %rax
+; SSE2-SSSE3-NEXT: shrq $32, %rax
+; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movq %rdi, %rax
+; SSE2-SSSE3-NEXT: shrq $48, %rax
+; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: shrl $16, %edi
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm6
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm6
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm6
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm6
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm7
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: ext_i64_64i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi11:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi12:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi13:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $17, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movl %edi, %ecx
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $18, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $19, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $20, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $21, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $22, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $23, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $25, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $26, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $27, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $28, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $29, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $30, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $31, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movl %edi, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm1
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $3, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $5, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $6, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $7, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $8, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $9, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $10, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $11, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $12, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $13, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $14, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $15, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $49, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movq %rdi, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm1
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $50, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $51, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $52, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $53, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $54, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $55, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $56, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $57, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $58, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $59, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $60, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $61, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $62, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $63, %rax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $33, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movq %rdi, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $34, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $35, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $36, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $37, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $38, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $39, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $40, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $41, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $42, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $43, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $44, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $45, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: shrq $46, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX1-NEXT: shrq $47, %rdi
+; AVX1-NEXT: andl $1, %edi
+; AVX1-NEXT: vpinsrb $15, %edi, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i64_64i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi11:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi12:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi13:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $17, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movl %edi, %ecx
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $18, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $19, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $20, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $21, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $22, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $23, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $24, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $25, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $26, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $27, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $28, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $29, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $30, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $31, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movl %edi, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm1
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $3, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $4, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $5, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $6, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $7, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $8, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $9, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $10, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $11, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $12, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $13, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $14, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $15, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $49, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movq %rdi, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm1
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $50, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $51, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $52, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $53, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $54, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $55, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $56, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $57, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $58, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $59, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $60, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $61, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $62, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $63, %rax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $33, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movq %rdi, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $34, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $35, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $36, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $37, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $38, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $39, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $40, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $41, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $42, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $43, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $44, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $45, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: shrq $46, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX2-NEXT: shrq $47, %rdi
+; AVX2-NEXT: andl $1, %edi
+; AVX2-NEXT: vpinsrb $15, %edi, %xmm2, %xmm2
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ext_i64_64i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovq %rdi, %k1
+; AVX512-NEXT: vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512-NEXT: retq
+ %1 = bitcast i64 %a0 to <64 x i1>
+ %2 = zext <64 x i1> %1 to <64 x i8>
+ ret <64 x i8> %2
+}
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
new file mode 100644
index 000000000000..a190e0575522
--- /dev/null
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
@@ -0,0 +1,685 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512
+
+define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
+; SSE2-SSSE3-LABEL: bitcast_i2_2i1:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
+; SSE2-SSSE3-NEXT: shrl %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %xmm1
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_i2_2i1:
+; AVX12: # BB#0:
+; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vmovq %rcx, %xmm0
+; AVX12-NEXT: shrl %eax
+; AVX12-NEXT: andl $1, %eax
+; AVX12-NEXT: vmovq %rax, %xmm1
+; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_i2_2i1:
+; AVX512: # BB#0:
+; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX512-NEXT: kmovd %eax, %k1
+; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = bitcast i2 %a0 to <2 x i1>
+ ret <2 x i1> %1
+}
+
+define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
+; SSE2-SSSE3-LABEL: bitcast_i4_4i1:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-SSSE3-NEXT: movd %eax, %xmm0
+; SSE2-SSSE3-NEXT: shrl %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm2
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_i4_4i1:
+; AVX1: # BB#0:
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: shrl $3, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_i4_4i1:
+; AVX2: # BB#0:
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: shrl $3, %eax
+; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_i4_4i1:
+; AVX512: # BB#0:
+; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX512-NEXT: kmovd %eax, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = bitcast i4 %a0 to <4 x i1>
+ ret <4 x i1> %1
+}
+
+define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
+; SSE2-SSSE3-LABEL: bitcast_i8_8i1:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: shrl $7, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm3
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_i8_8i1:
+; AVX12: # BB#0:
+; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: movl %eax, %edx
+; AVX12-NEXT: andl $1, %edx
+; AVX12-NEXT: vmovd %edx, %xmm0
+; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $2, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $3, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $4, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $5, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $6, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: shrl $7, %eax
+; AVX12-NEXT: movzwl %ax, %eax
+; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_i8_8i1:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: vpmovm2w %k0, %xmm0
+; AVX512-NEXT: retq
+ %1 = bitcast i8 %a0 to <8 x i1>
+ ret <8 x i1> %1
+}
+
+define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
+; SSE2-SSSE3-LABEL: bitcast_i16_16i1:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $7, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $6, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $5, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $4, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $3, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $11, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $10, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $9, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $8, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $13, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $12, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $14, %ecx
+; SSE2-SSSE3-NEXT: andl $1, %ecx
+; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
+; SSE2-SSSE3-NEXT: shrl $15, %eax
+; SSE2-SSSE3-NEXT: movzwl %ax, %eax
+; SSE2-SSSE3-NEXT: movd %eax, %xmm4
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_i16_16i1:
+; AVX12: # BB#0:
+; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: movl %eax, %edx
+; AVX12-NEXT: andl $1, %edx
+; AVX12-NEXT: vmovd %edx, %xmm0
+; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $2, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $3, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $4, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $5, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $6, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $7, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $8, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $9, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $10, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $11, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $12, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $13, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $14, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX12-NEXT: shrl $15, %eax
+; AVX12-NEXT: movzwl %ax, %eax
+; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_i16_16i1:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: vpmovm2b %k0, %xmm0
+; AVX512-NEXT: retq
+ %1 = bitcast i16 %a0 to <16 x i1>
+ ret <16 x i1> %1
+}
+
+define <32 x i1> @bitcast_i32_32i1(i32 %a0) {
+; SSE2-SSSE3-LABEL: bitcast_i32_32i1:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movl %esi, (%rdi)
+; SSE2-SSSE3-NEXT: movq %rdi, %rax
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_i32_32i1:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi0:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi1:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi2:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $32, %rsp
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $17, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movl %edi, %ecx
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $18, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $19, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $20, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $21, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $22, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $23, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $25, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $26, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $27, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $28, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $29, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $30, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $31, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movl %edi, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm1
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $3, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $5, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $6, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $7, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $8, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $9, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $10, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $11, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $12, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $13, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: shrl $14, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX1-NEXT: shrl $15, %edi
+; AVX1-NEXT: andl $1, %edi
+; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_i32_32i1:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi0:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi1:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi2:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $32, %rsp
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $17, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movl %edi, %ecx
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $18, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $19, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $20, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $21, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $22, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $23, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $24, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $25, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $26, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $27, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $28, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $29, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $30, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $31, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movl %edi, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm1
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $3, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $4, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $5, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $6, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $7, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $8, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $9, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $10, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $11, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $12, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $13, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: shrl $14, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX2-NEXT: shrl $15, %edi
+; AVX2-NEXT: andl $1, %edi
+; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_i32_32i1:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovd %edi, %k0
+; AVX512-NEXT: vpmovm2b %k0, %ymm0
+; AVX512-NEXT: retq
+ %1 = bitcast i32 %a0 to <32 x i1>
+ ret <32 x i1> %1
+}
+
+define <64 x i1> @bitcast_i64_64i1(i64 %a0) {
+; SSE2-SSSE3-LABEL: bitcast_i64_64i1:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movq %rsi, (%rdi)
+; SSE2-SSSE3-NEXT: movq %rdi, %rax
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_i64_64i1:
+; AVX12: # BB#0:
+; AVX12-NEXT: movq %rsi, (%rdi)
+; AVX12-NEXT: movq %rdi, %rax
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_i64_64i1:
+; AVX512: # BB#0:
+; AVX512-NEXT: kmovq %rdi, %k0
+; AVX512-NEXT: vpmovm2b %k0, %zmm0
+; AVX512-NEXT: retq
+ %1 = bitcast i64 %a0 to <64 x i1>
+ ret <64 x i1> %1
+}
diff --git a/test/CodeGen/X86/bitcast-setcc-128.ll b/test/CodeGen/X86/bitcast-setcc-128.ll
index 9bf7b41a4f26..5616276da08d 100644
--- a/test/CodeGen/X86/bitcast-setcc-128.ll
+++ b/test/CodeGen/X86/bitcast-setcc-128.ll
@@ -1,41 +1,41 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK,SSE2-SSSE3,SSE2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=CHECK,SSE2-SSSE3,SSSE3
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK,AVX12,AVX1
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK,AVX12,AVX2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512
define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: v8i16:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: pmovmskb %xmm0, %eax
-; SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v8i16:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v8i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: retq
%x = icmp sgt <8 x i16> %a, %b
%res = bitcast <8 x i1> %x to i8
@@ -44,21 +44,21 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-SSSE3-LABEL: v4i32:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4i32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -71,21 +71,21 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) {
define i4 @v4f32(<4 x float> %a, <4 x float> %b) {
; SSE2-SSSE3-LABEL: v4f32:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4f32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -98,24 +98,24 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b) {
define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-SSSE3-LABEL: v16i8:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v16i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX512-NEXT: retq
%x = icmp sgt <16 x i8> %a, %b
%res = bitcast <16 x i1> %x to i16
@@ -124,7 +124,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; SSE2-SSSE3-LABEL: v2i8:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: psllq $56, %xmm0
; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
@@ -151,11 +151,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vpsllq $56, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1
@@ -168,11 +168,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpsllq $56, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX2-NEXT: vpsrad $24, %xmm1, %xmm1
@@ -185,11 +185,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: retq
;
; AVX512-LABEL: v2i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpsllq $56, %xmm1, %xmm1
; AVX512-NEXT: vpsraq $56, %xmm1, %xmm1
; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0
@@ -206,7 +206,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; SSE2-SSSE3-LABEL: v2i16:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: psllq $48, %xmm0
; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
@@ -233,11 +233,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
@@ -250,11 +250,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX2-NEXT: vpsrad $16, %xmm1, %xmm1
@@ -267,11 +267,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: retq
;
; AVX512-LABEL: v2i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX512-NEXT: vpsraq $48, %xmm1, %xmm1
; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0
@@ -288,7 +288,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; SSE2-SSSE3-LABEL: v2i32:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: psllq $32, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
; SSE2-SSSE3-NEXT: psrad $31, %xmm0
@@ -311,11 +311,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -326,11 +326,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -341,11 +341,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: retq
;
; AVX512-LABEL: v2i32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX512-NEXT: vpsraq $32, %xmm1, %xmm1
; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0
@@ -362,7 +362,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-SSSE3-LABEL: v2i64:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1
; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0
@@ -375,18 +375,18 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v2i64:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -399,21 +399,21 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
define i2 @v2f64(<2 x double> %a, <2 x double> %b) {
; SSE2-SSSE3-LABEL: v2f64:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v2f64:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -426,29 +426,29 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b) {
define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
; SSE2-SSSE3-LABEL: v4i8:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pslld $24, %xmm1
; SSE2-SSSE3-NEXT: psrad $24, %xmm1
; SSE2-SSSE3-NEXT: pslld $24, %xmm0
; SSE2-SSSE3-NEXT: psrad $24, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpslld $24, %xmm1, %xmm1
; AVX12-NEXT: vpsrad $24, %xmm1, %xmm1
; AVX12-NEXT: vpslld $24, %xmm0, %xmm0
; AVX12-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpslld $24, %xmm1, %xmm1
; AVX512-NEXT: vpsrad $24, %xmm1, %xmm1
; AVX512-NEXT: vpslld $24, %xmm0, %xmm0
@@ -465,29 +465,29 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
; SSE2-SSSE3-LABEL: v4i16:
-; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pslld $16, %xmm1
; SSE2-SSSE3-NEXT: psrad $16, %xmm1
; SSE2-SSSE3-NEXT: pslld $16, %xmm0
; SSE2-SSSE3-NEXT: psrad $16, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpslld $16, %xmm1, %xmm1
; AVX12-NEXT: vpsrad $16, %xmm1, %xmm1
; AVX12-NEXT: vpslld $16, %xmm0, %xmm0
; AVX12-NEXT: vpsrad $16, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v4i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpslld $16, %xmm1, %xmm1
; AVX512-NEXT: vpsrad $16, %xmm1, %xmm1
; AVX512-NEXT: vpslld $16, %xmm0, %xmm0
@@ -504,7 +504,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; SSE2-LABEL: v8i8:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: psllw $8, %xmm1
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: psllw $8, %xmm0
@@ -513,11 +513,11 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: pmovmskb %xmm0, %eax
-; SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v8i8:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: psllw $8, %xmm1
; SSSE3-NEXT: psraw $8, %xmm1
; SSSE3-NEXT: psllw $8, %xmm0
@@ -525,11 +525,11 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSSE3-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
-; AVX12: ## BB#0:
+; AVX12: # BB#0:
; AVX12-NEXT: vpsllw $8, %xmm1, %xmm1
; AVX12-NEXT: vpsraw $8, %xmm1, %xmm1
; AVX12-NEXT: vpsllw $8, %xmm0, %xmm0
@@ -537,18 +537,18 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX12-NEXT: retq
;
; AVX512-LABEL: v8i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpsllw $8, %xmm1, %xmm1
; AVX512-NEXT: vpsraw $8, %xmm1, %xmm1
; AVX512-NEXT: vpsllw $8, %xmm0, %xmm0
; AVX512-NEXT: vpsraw $8, %xmm0, %xmm0
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: retq
%x = icmp sgt <8 x i8> %a, %b
%res = bitcast <8 x i1> %x to i8
diff --git a/test/CodeGen/X86/bitcast-setcc-256.ll b/test/CodeGen/X86/bitcast-setcc-256.ll
index b2c619c48d4d..86475c42e79e 100644
--- a/test/CodeGen/X86/bitcast-setcc-256.ll
+++ b/test/CodeGen/X86/bitcast-setcc-256.ll
@@ -1,23 +1,47 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+SSE2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+SSSE3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefix=AVX512
define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
+; SSE2-SSSE3-LABEL: v16i16:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: pcmpgtw %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: pcmpgtw %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: packsswb %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
+; SSE2-SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: v16i16:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v16i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = icmp sgt <16 x i16> %a, %b
@@ -26,19 +50,53 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
}
define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
+; SSE2-LABEL: v8i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE2-NEXT: packsswb %xmm1, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: v8i32:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm0
+; SSSE3-NEXT: packsswb %xmm1, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pmovmskb %xmm0, %eax
+; SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: v8i32:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v8i32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = icmp sgt <8 x i32> %a, %b
@@ -47,19 +105,51 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
}
define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
+; SSE2-LABEL: v8f32:
+; SSE2: # BB#0:
+; SSE2-NEXT: cmpltps %xmm1, %xmm3
+; SSE2-NEXT: cmpltps %xmm0, %xmm2
+; SSE2-NEXT: packsswb %xmm3, %xmm2
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT: packuswb %xmm2, %xmm2
+; SSE2-NEXT: pmovmskb %xmm2, %eax
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: v8f32:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: cmpltps %xmm1, %xmm3
+; SSSE3-NEXT: cmpltps %xmm0, %xmm2
+; SSSE3-NEXT: packsswb %xmm3, %xmm2
+; SSSE3-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pmovmskb %xmm2, %eax
+; SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v8f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: v8f32:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v8f32:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512-NEXT: kmovd %k0, %eax
-; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = fcmp ogt <8 x float> %a, %b
@@ -68,15 +158,241 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
}
define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
+; SSE2-SSSE3-LABEL: v32i8:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pcmpgtb %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-SSSE3-NEXT: shll $16, %ecx
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: orl %ecx, %eax
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi0:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi1:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi2:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $32, %rsp
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: movl (%rsp), %eax
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: v32i8:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v32i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: vzeroupper
@@ -87,16 +403,56 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
}
define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
+; SSE2-SSSE3-LABEL: v4i64:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm5
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm6, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm1, %xmm3
+; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: packsswb %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vmovmskps %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: v4i64:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v4i64:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -109,16 +465,35 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
}
define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
+; SSE2-SSSE3-LABEL: v4f64:
+; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3-NEXT: cmpltpd %xmm1, %xmm3
+; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: packsswb %xmm3, %xmm2
+; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
+; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v4f64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovmskps %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: v4f64:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
-; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v4f64:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
diff --git a/test/CodeGen/X86/bitcast-setcc-512.ll b/test/CodeGen/X86/bitcast-setcc-512.ll
new file mode 100644
index 000000000000..4a5ef99a8653
--- /dev/null
+++ b/test/CodeGen/X86/bitcast-setcc-512.ll
@@ -0,0 +1,1377 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW
+
+define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
+; SSE-LABEL: v32i16:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm7, %xmm3
+; SSE-NEXT: pextrb $14, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pcmpgtw %xmm6, %xmm2
+; SSE-NEXT: pextrb $14, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pcmpgtw %xmm5, %xmm1
+; SSE-NEXT: pextrb $14, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pcmpgtw %xmm4, %xmm0
+; SSE-NEXT: pextrb $14, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; SSE-NEXT: shll $16, %ecx
+; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: orl %ecx, %eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v32i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi0:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi1:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi2:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $32, %rsp
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpextrb $14, %xmm4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm4, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: movl (%rsp), %eax
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v32i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v32i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: pushq %rbp
+; AVX512F-NEXT: .Lcfi0:
+; AVX512F-NEXT: .cfi_def_cfa_offset 16
+; AVX512F-NEXT: .Lcfi1:
+; AVX512F-NEXT: .cfi_offset %rbp, -16
+; AVX512F-NEXT: movq %rsp, %rbp
+; AVX512F-NEXT: .Lcfi2:
+; AVX512F-NEXT: .cfi_def_cfa_register %rbp
+; AVX512F-NEXT: andq $-32, %rsp
+; AVX512F-NEXT: subq $32, %rsp
+; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm1
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm0
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, (%rsp)
+; AVX512F-NEXT: movl (%rsp), %eax
+; AVX512F-NEXT: movq %rbp, %rsp
+; AVX512F-NEXT: popq %rbp
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v32i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x = icmp sgt <32 x i16> %a, %b
+ %res = bitcast <32 x i1> %x to i32
+ ret i32 %res
+}
+
+define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
+; SSE-LABEL: v16i32:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm7, %xmm3
+; SSE-NEXT: pcmpgtd %xmm6, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: pcmpgtd %xmm5, %xmm1
+; SSE-NEXT: pcmpgtd %xmm4, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm2, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v16i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v16i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v16i32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v16i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x = icmp sgt <16 x i32> %a, %b
+ %res = bitcast <16 x i1> %x to i16
+ ret i16 %res
+}
+
+define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
+; SSE-LABEL: v16f32:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm3, %xmm7
+; SSE-NEXT: cmpltps %xmm2, %xmm6
+; SSE-NEXT: packsswb %xmm7, %xmm6
+; SSE-NEXT: cmpltps %xmm1, %xmm5
+; SSE-NEXT: cmpltps %xmm0, %xmm4
+; SSE-NEXT: packsswb %xmm5, %xmm4
+; SSE-NEXT: packsswb %xmm6, %xmm4
+; SSE-NEXT: pmovmskb %xmm4, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v16f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v16f32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v16f32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v16f32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x = fcmp ogt <16 x float> %a, %b
+ %res = bitcast <16 x i1> %x to i16
+ ret i16 %res
+}
+
+define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
+; SSE-LABEL: v64i8:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtb %xmm5, %xmm1
+; SSE-NEXT: pextrb $15, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm1, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pcmpgtb %xmm4, %xmm0
+; SSE-NEXT: pextrb $15, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm0, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pcmpgtb %xmm7, %xmm3
+; SSE-NEXT: pextrb $15, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm3, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pcmpgtb %xmm6, %xmm2
+; SSE-NEXT: pextrb $15, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $14, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $13, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $12, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $11, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $10, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $9, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $8, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $7, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $6, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $5, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $4, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $3, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $2, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $1, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: pextrb $0, %xmm2, %eax
+; SSE-NEXT: andb $1, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: shll $16, %eax
+; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %ecx
+; SSE-NEXT: orl %eax, %ecx
+; SSE-NEXT: movl -{{[0-9]+}}(%rsp), %edx
+; SSE-NEXT: shll $16, %edx
+; SSE-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: orl %edx, %eax
+; SSE-NEXT: shlq $32, %rax
+; SSE-NEXT: orq %rcx, %rax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v64i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: .Lcfi3:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: .Lcfi4:
+; AVX1-NEXT: .cfi_offset %rbp, -16
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: .Lcfi5:
+; AVX1-NEXT: .cfi_def_cfa_register %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpcmpgtb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpextrb $15, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rsp)
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm0
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX1-NEXT: movl (%rsp), %ecx
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: shlq $32, %rax
+; AVX1-NEXT: orq %rcx, %rax
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v64i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: .Lcfi0:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: .Lcfi1:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: .Lcfi2:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
+; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-NEXT: movl (%rsp), %ecx
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: shlq $32, %rax
+; AVX2-NEXT: orq %rcx, %rax
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v64i8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: pushq %rbp
+; AVX512F-NEXT: .Lcfi3:
+; AVX512F-NEXT: .cfi_def_cfa_offset 16
+; AVX512F-NEXT: .Lcfi4:
+; AVX512F-NEXT: .cfi_offset %rbp, -16
+; AVX512F-NEXT: movq %rsp, %rbp
+; AVX512F-NEXT: .Lcfi5:
+; AVX512F-NEXT: .cfi_def_cfa_register %rbp
+; AVX512F-NEXT: andq $-32, %rsp
+; AVX512F-NEXT: subq $64, %rsp
+; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, (%rsp)
+; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512F-NEXT: movl (%rsp), %ecx
+; AVX512F-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX512F-NEXT: shlq $32, %rax
+; AVX512F-NEXT: orq %rcx, %rax
+; AVX512F-NEXT: movq %rbp, %rsp
+; AVX512F-NEXT: popq %rbp
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v64i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kmovq %k0, %rax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x = icmp sgt <64 x i8> %a, %b
+ %res = bitcast <64 x i1> %x to i64
+ ret i64 %res
+}
+
+define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
+; SSE-LABEL: v8i64:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtq %xmm7, %xmm3
+; SSE-NEXT: pcmpgtq %xmm6, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: pcmpgtq %xmm5, %xmm1
+; SSE-NEXT: pcmpgtq %xmm4, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm2, %xmm0
+; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v8i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v8i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v8i64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v8i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x = icmp sgt <8 x i64> %a, %b
+ %res = bitcast <8 x i1> %x to i8
+ ret i8 %res
+}
+
+define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
+; SSE-LABEL: v8f64:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm3, %xmm7
+; SSE-NEXT: cmpltpd %xmm2, %xmm6
+; SSE-NEXT: packsswb %xmm7, %xmm6
+; SSE-NEXT: cmpltpd %xmm1, %xmm5
+; SSE-NEXT: cmpltpd %xmm0, %xmm4
+; SSE-NEXT: packsswb %xmm5, %xmm4
+; SSE-NEXT: packsswb %xmm6, %xmm4
+; SSE-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSE-NEXT: pmovmskb %xmm4, %eax
+; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: v8f64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v8f64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v8f64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v8f64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %x = fcmp ogt <8 x double> %a, %b
+ %res = bitcast <8 x i1> %x to i8
+ ret i8 %res
+}
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index c7de65d84507..b3f6534d14b3 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -354,6 +354,7 @@ define void @unnatural_cfg2() {
; single-source GCC.
; CHECK-LABEL: unnatural_cfg2
; CHECK: %entry
+; CHECK: %loop.header
; CHECK: %loop.body1
; CHECK: %loop.body2
; CHECK: %loop.body4
@@ -361,7 +362,6 @@ define void @unnatural_cfg2() {
; CHECK: %loop.inner2.begin
; CHECK: %loop.body3
; CHECK: %loop.inner1.begin
-; CHECK: %loop.header
; CHECK: %bail
entry:
@@ -1491,6 +1491,102 @@ ret: ; preds = %endif, %then
ret void
}
+define i32 @not_rotate_if_extra_branch(i32 %count) {
+; Test checks that there is no loop rotation
+; if it introduces extra branch.
+; Specifically in this case because best exit is .header
+; but it has fallthrough to .middle block and last block in
+; loop chain .slow does not have afallthrough to .header.
+; CHECK-LABEL: not_rotate_if_extra_branch
+; CHECK: %.entry
+; CHECK: %.header
+; CHECK: %.middle
+; CHECK: %.backedge
+; CHECK: %.slow
+; CHECK: %.bailout
+; CHECK: %.stop
+.entry:
+ %sum.0 = shl nsw i32 %count, 1
+ br label %.header
+
+.header:
+ %i = phi i32 [ %i.1, %.backedge ], [ 0, %.entry ]
+ %sum = phi i32 [ %sum.1, %.backedge ], [ %sum.0, %.entry ]
+ %is_exc = icmp sgt i32 %i, 9000000
+ br i1 %is_exc, label %.bailout, label %.middle, !prof !13
+
+.bailout:
+ %sum.2 = add nsw i32 %count, 1
+ br label %.stop
+
+.middle:
+ %pr.1 = and i32 %i, 1023
+ %pr.2 = icmp eq i32 %pr.1, 0
+ br i1 %pr.2, label %.slow, label %.backedge, !prof !14
+
+.slow:
+ tail call void @effect(i32 %sum)
+ br label %.backedge
+
+.backedge:
+ %sum.1 = add nsw i32 %i, %sum
+ %i.1 = add nsw i32 %i, 1
+ %end = icmp slt i32 %i.1, %count
+ br i1 %end, label %.header, label %.stop, !prof !15
+
+.stop:
+ %sum.phi = phi i32 [ %sum.1, %.backedge ], [ %sum.2, %.bailout ]
+ ret i32 %sum.phi
+}
+
+define i32 @not_rotate_if_extra_branch_regression(i32 %count, i32 %init) {
+; This is a regression test against patch avoid loop rotation if
+; it introduce an extra btanch.
+; CHECK-LABEL: not_rotate_if_extra_branch_regression
+; CHECK: %.entry
+; CHECK: %.first_backedge
+; CHECK: %.slow
+; CHECK: %.second_header
+.entry:
+ %sum.0 = shl nsw i32 %count, 1
+ br label %.first_header
+
+.first_header:
+ %i = phi i32 [ %i.1, %.first_backedge ], [ 0, %.entry ]
+ %is_bo1 = icmp sgt i32 %i, 9000000
+ br i1 %is_bo1, label %.bailout, label %.first_backedge, !prof !14
+
+.first_backedge:
+ %i.1 = add nsw i32 %i, 1
+ %end = icmp slt i32 %i.1, %count
+ br i1 %end, label %.first_header, label %.second_header, !prof !13
+
+.second_header:
+ %j = phi i32 [ %j.1, %.second_backedge ], [ %init, %.first_backedge ]
+ %end.2 = icmp sgt i32 %j, %count
+ br i1 %end.2, label %.stop, label %.second_middle, !prof !14
+
+.second_middle:
+ %is_slow = icmp sgt i32 %j, 9000000
+ br i1 %is_slow, label %.slow, label %.second_backedge, !prof !14
+
+.slow:
+ tail call void @effect(i32 %j)
+ br label %.second_backedge
+
+.second_backedge:
+ %j.1 = add nsw i32 %j, 1
+ %end.3 = icmp slt i32 %j, 10000000
+ br i1 %end.3, label %.second_header, label %.stop, !prof !13
+
+.stop:
+ %res = add nsw i32 %j, %i.1
+ ret i32 %res
+
+.bailout:
+ ret i32 0
+}
+
declare void @effect(i32)
!5 = !{!"branch_weights", i32 84, i32 16}
@@ -1501,3 +1597,6 @@ declare void @effect(i32)
!10 = !{!"branch_weights", i32 90, i32 10}
!11 = !{!"branch_weights", i32 1, i32 1}
!12 = !{!"branch_weights", i32 5, i32 3}
+!13 = !{!"branch_weights", i32 1, i32 1}
+!14 = !{!"branch_weights", i32 1, i32 1023}
+!15 = !{!"branch_weights", i32 4095, i32 1}
diff --git a/test/CodeGen/X86/bool-simplify.ll b/test/CodeGen/X86/bool-simplify.ll
index a0a1c3646624..7f7f9791d903 100644
--- a/test/CodeGen/X86/bool-simplify.ll
+++ b/test/CodeGen/X86/bool-simplify.ll
@@ -1,45 +1,62 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse4.1,-avx,+rdrnd,+rdseed | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1,-avx,+rdrnd,+rdseed | FileCheck %s
define i32 @foo(<2 x i64> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: foo:
+; CHECK: # BB#0:
+; CHECK-NEXT: ptest %xmm0, %xmm0
+; CHECK-NEXT: cmovnel %esi, %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
%t1 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %c, <2 x i64> %c)
%t2 = icmp ne i32 %t1, 0
%t3 = select i1 %t2, i32 %a, i32 %b
ret i32 %t3
-; CHECK: foo
-; CHECK: ptest
-; CHECK-NOT: testl
-; CHECK: cmov
-; CHECK: ret
}
define i32 @bar(<2 x i64> %c) {
+; CHECK-LABEL: bar:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: ptest %xmm0, %xmm0
+; CHECK-NEXT: jne .LBB1_2
+; CHECK-NEXT: # BB#1: # %if-true-block
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB1_2: # %endif-block
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: retq
entry:
%0 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %c, <2 x i64> %c)
%1 = icmp ne i32 %0, 0
br i1 %1, label %if-true-block, label %endif-block
-if-true-block: ; preds = %entry
+if-true-block:
ret i32 0
-endif-block: ; preds = %entry,
+endif-block:
ret i32 1
-; CHECK: bar
-; CHECK: ptest
-; CHECK-NOT: testl
-; CHECK: jne
-; CHECK: ret
}
define i32 @bax(<2 x i64> %c) {
+; CHECK-LABEL: bax:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: ptest %xmm0, %xmm0
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
%t1 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %c, <2 x i64> %c)
%t2 = icmp eq i32 %t1, 1
%t3 = zext i1 %t2 to i32
ret i32 %t3
-; CHECK: bax
-; CHECK: ptest
-; CHECK-NOT: cmpl
-; CHECK: ret
}
-define i16 @rnd16(i16 %arg) nounwind uwtable {
+define i16 @rnd16(i16 %arg) nounwind {
+; CHECK-LABEL: rnd16:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rdrandw %cx
+; CHECK-NEXT: cmovbw %di, %ax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: retq
%1 = tail call { i16, i32 } @llvm.x86.rdrand.16() nounwind
%2 = extractvalue { i16, i32 } %1, 0
%3 = extractvalue { i16, i32 } %1, 1
@@ -47,14 +64,16 @@ define i16 @rnd16(i16 %arg) nounwind uwtable {
%5 = select i1 %4, i16 0, i16 %arg
%6 = add i16 %5, %2
ret i16 %6
-; CHECK: rnd16
-; CHECK: rdrand
-; CHECK: cmov
-; CHECK-NOT: cmov
-; CHECK: ret
}
-define i32 @rnd32(i32 %arg) nounwind uwtable {
+define i32 @rnd32(i32 %arg) nounwind {
+; CHECK-LABEL: rnd32:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rdrandl %ecx
+; CHECK-NEXT: cmovbl %edi, %eax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: retq
%1 = tail call { i32, i32 } @llvm.x86.rdrand.32() nounwind
%2 = extractvalue { i32, i32 } %1, 0
%3 = extractvalue { i32, i32 } %1, 1
@@ -62,14 +81,16 @@ define i32 @rnd32(i32 %arg) nounwind uwtable {
%5 = select i1 %4, i32 0, i32 %arg
%6 = add i32 %5, %2
ret i32 %6
-; CHECK: rnd32
-; CHECK: rdrand
-; CHECK: cmov
-; CHECK-NOT: cmov
-; CHECK: ret
}
-define i64 @rnd64(i64 %arg) nounwind uwtable {
+define i64 @rnd64(i64 %arg) nounwind {
+; CHECK-LABEL: rnd64:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rdrandq %rcx
+; CHECK-NEXT: cmovbq %rdi, %rax
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: retq
%1 = tail call { i64, i32 } @llvm.x86.rdrand.64() nounwind
%2 = extractvalue { i64, i32 } %1, 0
%3 = extractvalue { i64, i32 } %1, 1
@@ -77,14 +98,17 @@ define i64 @rnd64(i64 %arg) nounwind uwtable {
%5 = select i1 %4, i64 0, i64 %arg
%6 = add i64 %5, %2
ret i64 %6
-; CHECK: rnd64
-; CHECK: rdrand
-; CHECK: cmov
-; CHECK-NOT: cmov
-; CHECK: ret
}
-define i16 @seed16(i16 %arg) nounwind uwtable {
+define i16 @seed16(i16 %arg) nounwind {
+; CHECK-LABEL: seed16:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rdseedw %cx
+; CHECK-NEXT: cmovbw %di, %ax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: retq
%1 = tail call { i16, i32 } @llvm.x86.rdseed.16() nounwind
%2 = extractvalue { i16, i32 } %1, 0
%3 = extractvalue { i16, i32 } %1, 1
@@ -92,14 +116,16 @@ define i16 @seed16(i16 %arg) nounwind uwtable {
%5 = select i1 %4, i16 0, i16 %arg
%6 = add i16 %5, %2
ret i16 %6
-; CHECK: seed16
-; CHECK: rdseed
-; CHECK: cmov
-; CHECK-NOT: cmov
-; CHECK: ret
}
-define i32 @seed32(i32 %arg) nounwind uwtable {
+define i32 @seed32(i32 %arg) nounwind {
+; CHECK-LABEL: seed32:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rdseedl %ecx
+; CHECK-NEXT: cmovbl %edi, %eax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: retq
%1 = tail call { i32, i32 } @llvm.x86.rdseed.32() nounwind
%2 = extractvalue { i32, i32 } %1, 0
%3 = extractvalue { i32, i32 } %1, 1
@@ -107,14 +133,16 @@ define i32 @seed32(i32 %arg) nounwind uwtable {
%5 = select i1 %4, i32 0, i32 %arg
%6 = add i32 %5, %2
ret i32 %6
-; CHECK: seed32
-; CHECK: rdseed
-; CHECK: cmov
-; CHECK-NOT: cmov
-; CHECK: ret
}
-define i64 @seed64(i64 %arg) nounwind uwtable {
+define i64 @seed64(i64 %arg) nounwind {
+; CHECK-LABEL: seed64:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rdseedq %rcx
+; CHECK-NEXT: cmovbq %rdi, %rax
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: retq
%1 = tail call { i64, i32 } @llvm.x86.rdseed.64() nounwind
%2 = extractvalue { i64, i32 } %1, 0
%3 = extractvalue { i64, i32 } %1, 1
@@ -122,11 +150,6 @@ define i64 @seed64(i64 %arg) nounwind uwtable {
%5 = select i1 %4, i64 0, i64 %arg
%6 = add i64 %5, %2
ret i64 %6
-; CHECK: seed64
-; CHECK: rdseed
-; CHECK: cmov
-; CHECK-NOT: cmov
-; CHECK: ret
}
declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
index bbe31c5c2ac5..14bdb3853b03 100644
--- a/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
+++ b/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
@@ -1,13 +1,12 @@
-; NOTE: Assertions have been simpilfied MANUALLY after running utils/update_llc_test_checks.py
-; Assertions for constant pools have been added MANUALLY.
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s -check-prefix=ALL -check-prefix=ALL32 -check-prefix=NO-AVX512BW -check-prefix=AVX2
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s -check-prefix=ALL -check-prefix=ALL32 -check-prefix=NO-AVX512BW -check-prefix=AVX512
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f,+avx512bw | FileCheck %s -check-prefix=ALL -check-prefix=ALL32 -check-prefix=AVX512BW -check-prefix=AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s -check-prefix=ALL -check-prefix=ALL64 -check-prefix=NO-AVX512BW -check-prefix=AVX2 -check-prefix=AVX2-64
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s -check-prefix=ALL -check-prefix=ALL64 -check-prefix=NO-AVX512BW -check-prefix=AVX512 -check-prefix=AVX512F-64
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw | FileCheck %s -check-prefix=ALL -check-prefix=ALL64 -check-prefix=AVX512BW -check-prefix=AVX512 -check-prefix=AVX512BW-64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s -check-prefix=ALL32 -check-prefix=NO-AVX512BW -check-prefix=AVX2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s -check-prefix=ALL32 -check-prefix=NO-AVX512BW -check-prefix=AVX512
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f,+avx512bw | FileCheck %s -check-prefix=ALL32 -check-prefix=AVX512 -check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-64
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s -check-prefix=ALL64 -check-prefix=NO-AVX512BW-64 -check-prefix=AVX2-64
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s -check-prefix=ALL64 -check-prefix=NO-AVX512BW-64 -check-prefix=AVX512F-64
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw | FileCheck %s -check-prefix=ALL64 -check-prefix=AVX512F-64 -check-prefix=AVX512BW-64
;===-----------------------------------------------------------------------------===
; This test checks the ability to recognize a cross element pattern of
@@ -17,20 +16,31 @@
; <i32 0, i32 1, i32 0, i32 1> => broadcast of the constant vector <i32 0, i32 1>
;===-----------------------------------------------------------------------------===
-; ALL: LCPI0
-; ALL-NEXT: .short 256 # 0x100
-
define <16 x i8> @f16xi8_i16(<16 x i8> %a) {
+; AVX-LABEL: f16xi8_i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f16xi8_i16:
; ALL32: # BB#0:
-; ALL32-NEXT: vpbroadcastw {{\.LCPI.*}}, %xmm1
+; ALL32-NEXT: vpbroadcastw {{.*#+}} xmm1 = [256,256,256,256,256,256,256,256]
; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f16xi8_i16:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f16xi8_i16:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastw {{.*}}(%rip), %xmm1
+; ALL64-NEXT: vpbroadcastw {{.*#+}} xmm1 = [256,256,256,256,256,256,256,256]
; ALL64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL64-NEXT: retq
@@ -40,45 +50,48 @@ define <16 x i8> @f16xi8_i16(<16 x i8> %a) {
}
-; ALL: .LCPI1
-; ALL-NEXT: .long 50462976 # 0x3020100
-
-; AVX: .LCPI1
-; AVX-NEXT .long 50462976 # float 3.82047143E-37
-
define <16 x i8> @f16xi8_i32(<16 x i8> %a) {
+; AVX-LABEL: f16xi8_i32:
+; AVX: # BB#0:
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
+; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f16xi8_i32:
; ALL32: # BB#0:
-; ALL32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
+; ALL32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [50462976,50462976,50462976,50462976]
; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f16xi8_i32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
+; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f16xi8_i32:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; ALL64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [50462976,50462976,50462976,50462976]
; ALL64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f16xi8_i32:
-; AVX: # BB#0:
-; AVX-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm1
-; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
%res1 = add <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>, %a
%res2 = and <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>, %res1
ret <16 x i8> %res2
}
-; ALL64: .LCPI2
-; ALL64-NEXT: .quad 506097522914230528 # 0x706050403020100
-
-; AVX: .LCPI2
-; AVX-NEXT: .quad 506097522914230528 # double 7.9499288951273625E-275
-
define <16 x i8> @f16xi8_i64(<16 x i8> %a) {
+; AVX-LABEL: f16xi8_i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f16xi8_i64:
; ALL32: # BB#0:
; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
@@ -86,38 +99,56 @@ define <16 x i8> @f16xi8_i64(<16 x i8> %a) {
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f16xi8_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f16xi8_i64:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastq {{.*}}(%rip), %xmm1
+; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [506097522914230528,506097522914230528]
; ALL64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f16xi8_i64:
-; AVX: # BB#0:
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
-; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
%res1 = add <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, %a
%res2 = and <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, %res1
ret <16 x i8> %res2
}
-; ALL: .LCPI3
-; ALL-NEXT: .short 256 # 0x100
-
define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
+; AVX-LABEL: f32xi8_i16:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f32xi8_i16:
; ALL32: # BB#0:
-; ALL32-NEXT: vpbroadcastw {{\.LCPI.*}}, %ymm1
+; ALL32-NEXT: vpbroadcastw {{.*#+}} ymm1 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f32xi8_i16:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f32xi8_i16:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastw {{.*}}(%rip), %ymm1
+; ALL64-NEXT: vpbroadcastw {{.*#+}} ymm1 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; ALL64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL64-NEXT: retq
@@ -127,155 +158,273 @@ define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
}
-; ALL: .LCPI4
-; ALL-NEXT: .long 50462976 # 0x3020100
-
-; AVX: .LCPI4
-; AVX-NEXT: .long 50462976 # float 3.82047143E-37
-
define <32 x i8> @f32xi8_i32(<32 x i8> %a) {
+; AVX-LABEL: f32xi8_i32:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
+; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f32xi8_i32:
; ALL32: # BB#0:
-; ALL32-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm1
+; ALL32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f32xi8_i32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
+; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f32xi8_i32:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; ALL64-NEXT: vpbroadcastd {{.*#+}} ymm1 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; ALL64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f32xi8_i32:
-; AVX: # BB#0:
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm2
-; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
%res1 = add <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>, %a
%res2 = and <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>, %res1
ret <32 x i8> %res2
}
-; ALL64: .LCPI5
-; ALL64-NEXT: .quad 506097522914230528 # 0x706050403020100
-
-; AVX: .LCPI5
-; AVX-NEXT: .quad 506097522914230528 # double 7.9499288951273625E-275
-
define <32 x i8> @f32xi8_i64(<32 x i8> %a) {
+; AVX-LABEL: f32xi8_i64:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f32xi8_i64:
; ALL32: # BB#0:
-; ALL32-NEXT: vpbroadcastq {{\.LCPI.*}}, %ymm1
+; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275]
; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f32xi8_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f32xi8_i64:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; ALL64-NEXT: vpbroadcastq {{.*#+}} ymm1 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528]
; ALL64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f32xi8_i64:
-; AVX: # BB#0:
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
-; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
%res1 = add <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, %a
%res2 = and <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, %res1
ret <32 x i8> %res2
}
-; ALL: .LCPI6
-; ALL-NEXT: .byte 0 # 0x0
-; ALL-NEXT: .byte 1 # 0x1
-; ALL-NEXT: .byte 2 # 0x2
-; ALL-NEXT: .byte 3 # 0x3
-; ALL-NEXT: .byte 4 # 0x4
-; ALL-NEXT: .byte 5 # 0x5
-; ALL-NEXT: .byte 6 # 0x6
-; ALL-NEXT: .byte 7 # 0x7
-; ALL-NEXT: .byte 8 # 0x8
-; ALL-NEXT: .byte 9 # 0x9
-; ALL-NEXT: .byte 10 # 0xa
-; ALL-NEXT: .byte 11 # 0xb
-; ALL-NEXT: .byte 12 # 0xc
-; ALL-NEXT: .byte 13 # 0xd
-; ALL-NEXT: .byte 14 # 0xe
-; ALL-NEXT: .byte 15 # 0xf
-; ALL-NOT: .byte
-
define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
-; ALL-LABEL: f32xi8_i128:
-; ALL: # BB#0:
-; ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; ALL-NEXT: vpaddb %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX-LABEL: f32xi8_i128:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f32xi8_i128:
+; ALL32: # BB#0:
+; ALL32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f32xi8_i128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f32xi8_i128:
+; ALL64: # BB#0:
+; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: retq
%res1 = add <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, %a
%res2 = and <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, %res1
ret <32 x i8> %res2
}
-; ALL: .LCPI7
-; ALL-NEXT: .short 256 # 0x100
-
define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
+; AVX-LABEL: f64xi8_i16:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: retl
+;
; NO-AVX512BW-LABEL: f64xi8_i16:
; NO-AVX512BW: # BB#0:
-; NO-AVX512BW-NEXT: vpbroadcastw {{\.LCPI.*}}, %ymm2
+; NO-AVX512BW-NEXT: vpbroadcastw {{.*#+}} ymm2 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64xi8_i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpbroadcastw {{\.LCPI.*}}, %zmm1
+; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm1 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
+;
+; AVX-64-LABEL: f64xi8_i16:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f64xi8_i16:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vpbroadcastw {{.*#+}} ymm2 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f64xi8_i16:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vpbroadcastw {{.*#+}} zmm1 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
+; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
%res1 = add <64 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>, %a
%res2 = and <64 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>, %res1
ret <64 x i8> %res2
}
-; ALL: .LCPI8
-; ALL-NEXT: .long 50462976 # 0x3020100
-
-; AVX: .LCPI8
-; AVX-NEXT: .long 50462976 # float 3.82047143E-37
-
define <64 x i8> @f64i8_i32(<64 x i8> %a) {
+; AVX-LABEL: f64i8_i32:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
+; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: retl
+;
; NO-AVX512BW-LABEL: f64i8_i32:
; NO-AVX512BW: # BB#0:
-; NO-AVX512BW-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm2
+; NO-AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm2 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64i8_i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpbroadcastd {{\.LCPI.*}}, %zmm1
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} zmm1 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
;
-; AVX-LABEL: f64i8_i32:
+; AVX-64-LABEL: f64i8_i32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f64i8_i32:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f64i8_i32:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vpbroadcastd {{.*#+}} zmm1 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
+; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
+ %res1 = add <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>, %a
+ %res2 = and <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>, %res1
+ ret <64 x i8> %res2
+}
+
+
+define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
+; AVX-LABEL: f64xi8_i64:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm3
+; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddb %xmm3, %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -283,43 +432,69 @@ define <64 x i8> @f64i8_i32(<64 x i8> %a) {
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddb %xmm3, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
- %res1 = add <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>, %a
- %res2 = and <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>, %res1
- ret <64 x i8> %res2
-}
-
-
-; ALL64: .LCPI9
-; ALL64-NEXT: .quad 506097522914230528 # 0x706050403020100
-
-; ALL32: .LCPI9
-; ALL32-NEXT: .quad 506097522914230528 # double 7.9499288951273625E-275
-
-; AVX: .LCPI9
-; AVX-NEXT: .quad 506097522914230528 # double 7.9499288951273625E-275
-
-define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
+; AVX-NEXT: retl
+;
; NO-AVX512BW-LABEL: f64xi8_i64:
; NO-AVX512BW: # BB#0:
-; NO-AVX512BW-NEXT: vpbroadcastq {{.*}}, %ymm2
+; NO-AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64xi8_i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpbroadcastq {{.*}}, %zmm1
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
;
-; AVX-LABEL: f64xi8_i64:
+; AVX-64-LABEL: f64xi8_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f64xi8_i64:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528]
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f64xi8_i64:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528]
+; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
+ %res1 = add <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, %a
+ %res2 = and <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, %res1
+ ret <64 x i8> %res2
+}
+
+
+define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
+; AVX-LABEL: f64xi8_i128:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddb %xmm3, %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -327,143 +502,184 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddb %xmm3, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
- %res1 = add <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, %a
- %res2 = and <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, %res1
- ret <64 x i8> %res2
-}
-
-
-; ALL: .LCPI10
-; ALL-NEXT: .byte 0 # 0x0
-; ALL-NEXT: .byte 1 # 0x1
-; ALL-NEXT: .byte 2 # 0x2
-; ALL-NEXT: .byte 3 # 0x3
-; ALL-NEXT: .byte 4 # 0x4
-; ALL-NEXT: .byte 5 # 0x5
-; ALL-NEXT: .byte 6 # 0x6
-; ALL-NEXT: .byte 7 # 0x7
-; ALL-NEXT: .byte 8 # 0x8
-; ALL-NEXT: .byte 9 # 0x9
-; ALL-NEXT: .byte 10 # 0xa
-; ALL-NEXT: .byte 11 # 0xb
-; ALL-NEXT: .byte 12 # 0xc
-; ALL-NEXT: .byte 13 # 0xd
-; ALL-NEXT: .byte 14 # 0xe
-; ALL-NEXT: .byte 15 # 0xf
-; ALL-NOT: .byte
-
-define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
+; AVX-NEXT: retl
+;
; NO-AVX512BW-LABEL: f64xi8_i128:
; NO-AVX512BW: # BB#0:
-; NO-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; NO-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NO-AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64xi8_i128:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
+;
+; AVX-64-LABEL: f64xi8_i128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f64xi8_i128:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NO-AVX512BW-64-NEXT: # ymm2 = mem[0,1,0,1]
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f64xi8_i128:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
%res1 = add <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, %a
%res2 = and <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, %res1
ret <64 x i8> %res2
}
-; AVX512BW: .LCPI11
-; AVX512BW-NEXT: .byte 0 # 0x0
-; AVX512BW-NEXT: .byte 1 # 0x1
-; AVX512BW-NEXT: .byte 2 # 0x2
-; AVX512BW-NEXT: .byte 3 # 0x3
-; AVX512BW-NEXT: .byte 4 # 0x4
-; AVX512BW-NEXT: .byte 5 # 0x5
-; AVX512BW-NEXT: .byte 6 # 0x6
-; AVX512BW-NEXT: .byte 7 # 0x7
-; AVX512BW-NEXT: .byte 8 # 0x8
-; AVX512BW-NEXT: .byte 9 # 0x9
-; AVX512BW-NEXT: .byte 10 # 0xa
-; AVX512BW-NEXT: .byte 11 # 0xb
-; AVX512BW-NEXT: .byte 12 # 0xc
-; AVX512BW-NEXT: .byte 13 # 0xd
-; AVX512BW-NEXT: .byte 14 # 0xe
-; AVX512BW-NEXT: .byte 15 # 0xf
-; AVX512BW-NEXT: .byte 16 # 0x10
-; AVX512BW-NEXT: .byte 17 # 0x11
-; AVX512BW-NEXT: .byte 18 # 0x12
-; AVX512BW-NEXT: .byte 19 # 0x13
-; AVX512BW-NEXT: .byte 20 # 0x14
-; AVX512BW-NEXT: .byte 21 # 0x15
-; AVX512BW-NEXT: .byte 22 # 0x16
-; AVX512BW-NEXT: .byte 23 # 0x17
-; AVX512BW-NEXT: .byte 24 # 0x18
-; AVX512BW-NEXT: .byte 25 # 0x19
-; AVX512BW-NEXT: .byte 26 # 0x1a
-; AVX512BW-NEXT: .byte 27 # 0x1b
-; AVX512BW-NEXT: .byte 28 # 0x1c
-; AVX512BW-NEXT: .byte 29 # 0x1d
-; AVX512BW-NEXT: .byte 30 # 0x1e
-; AVX512BW-NEXT: .byte 31 # 0x1f
-; AVX512BW-NOT: .byte
-
define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
+; AVX-LABEL: f64xi8_i256:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-NEXT: vpaddb %xmm4, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddb %xmm4, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: retl
+;
+; NO-AVX512BW-LABEL: f64xi8_i256:
+; NO-AVX512BW: # BB#0:
+; NO-AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
+;
; AVX512BW-LABEL: f64xi8_i256:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
+;
+; AVX-64-LABEL: f64xi8_i256:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-64-NEXT: vpaddb %xmm4, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddb %xmm4, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f64xi8_i256:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f64xi8_i256:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; AVX512BW-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
%res1 = add <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, %a
%res2 = and <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, %res1
ret <64 x i8> %res2
}
-; ALL: .LCPI12
-; ALL-NEXT: .long 65536 # 0x10000
-
-; AVX: .LCPI12
-; AVX-NEXT: .long 65536 # float 9.18354962E-41
-
define <8 x i16> @f8xi16_i32(<8 x i16> %a) {
+; AVX-LABEL: f8xi16_i32:
+; AVX: # BB#0:
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f8xi16_i32:
; ALL32: # BB#0:
-; ALL32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
+; ALL32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65536,65536,65536,65536]
; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f8xi16_i32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
+; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f8xi16_i32:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; ALL64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65536,65536,65536,65536]
; ALL64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f8xi16_i32:
-; AVX: # BB#0:
-; AVX-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm1
-; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
%res1 = add <8 x i16> <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>, %a
%res2 = and <8 x i16> <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>, %res1
ret <8 x i16> %res2
}
-; ALL64: .LCPI13
-; ALL64-NEXT: .quad 844433520132096 # 0x3000200010000
-
-; ALL32: .LCPI13
-; ALL32-NEXT: .quad 844433520132096 # double 4.1720559249406128E-309
-
-; AVX: .LCPI13
-; AVX-NEXT: .quad 844433520132096 # double 4.1720559249406128E-309
-
define <8 x i16> @f8xi16_i64(<8 x i16> %a) {
+; AVX-LABEL: f8xi16_i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f8xi16_i64:
; ALL32: # BB#0:
; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
@@ -471,67 +687,66 @@ define <8 x i16> @f8xi16_i64(<8 x i16> %a) {
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f8xi16_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f8xi16_i64:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastq {{.*}}(%rip), %xmm1
+; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [844433520132096,844433520132096]
; ALL64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f8xi16_i64:
-; AVX: # BB#0:
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
-; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
%res1 = add <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3>, %a
%res2 = and <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3>, %res1
ret <8 x i16> %res2
}
-; ALL: .LCPI14
-; ALL-NEXT: .long 65536 # 0x10000
-
-; AVX: .LCPI14
-; AVX-NEXT: .long 65536 # float 9.18354962E-41
-
define <16 x i16> @f16xi16_i32(<16 x i16> %a) {
-; ALL-LABEL: f16xi16_i32:
-; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm1
-; ALL-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vpand %ymm1, %ymm0, %ymm0
-;
; AVX-LABEL: f16xi16_i32:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm2
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f16xi16_i32:
+; ALL32: # BB#0:
+; ALL32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65536,65536,65536,65536,65536,65536,65536,65536]
+; ALL32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f16xi16_i32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
+; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f16xi16_i32:
+; ALL64: # BB#0:
+; ALL64-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65536,65536,65536,65536,65536,65536,65536,65536]
+; ALL64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: retq
%res1 = add <16 x i16> <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>, %a
%res2 = and <16 x i16> <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>, %res1
ret <16 x i16> %res2
}
-; ALL64: .LCPI15
-; ALL64-NEXT: .quad 844433520132096 # 0x3000200010000
-
-; ALL32: .LCPI15
-; ALL32-NEXT: .quad 844433520132096 # double 4.1720559249406128E-309
-
-; AVX: .LCPI15
-; AVX-NEXT: .quad 844433520132096 # double 4.1720559249406128E-309
-
define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
-; ALL-LABEL: f16xi16_i64:
-; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastq {{.*}}, %ymm1
-; ALL-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vpand %ymm1, %ymm0, %ymm0
-;
; AVX-LABEL: f16xi16_i64:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -540,60 +755,154 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f16xi16_i64:
+; ALL32: # BB#0:
+; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309]
+; ALL32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f16xi16_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f16xi16_i64:
+; ALL64: # BB#0:
+; ALL64-NEXT: vpbroadcastq {{.*#+}} ymm1 = [844433520132096,844433520132096,844433520132096,844433520132096]
+; ALL64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: retq
%res1 = add <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3>, %a
%res2 = and <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3>, %res1
ret <16 x i16> %res2
}
-; ALL: .LCPI16
-; ALL-NEXT: .short 0 # 0x0
-; ALL-NEXT: .short 1 # 0x1
-; ALL-NEXT: .short 2 # 0x2
-; ALL-NEXT: .short 3 # 0x3
-; ALL-NEXT: .short 4 # 0x4
-; ALL-NEXT: .short 5 # 0x5
-; ALL-NEXT: .short 6 # 0x6
-; ALL-NEXT: .short 7 # 0x7
-; ALL-NOT: .short
-
define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
-; ALL-LABEL: f16xi16_i128:
-; ALL: # BB#0:
-; ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; ALL-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX-LABEL: f16xi16_i128:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
+; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f16xi16_i128:
+; ALL32: # BB#0:
+; ALL32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f16xi16_i128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
+; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f16xi16_i128:
+; ALL64: # BB#0:
+; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: retq
%res1 = add <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, %a
%res2 = and <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, %res1
ret <16 x i16> %res2
}
-; ALL: .LCPI17
-; ALL-NEXT: .long 65536 # 0x10000
-
-; AVX: .LCPI17
-; AVX-NEXT: .long 65536 # float 9.18354962E-41
-
define <32 x i16> @f32xi16_i32(<32 x i16> %a) {
+; AVX-LABEL: f32xi16_i32:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm3 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
+; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddw %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: retl
+;
; NO-AVX512BW-LABEL: f32xi16_i32:
; NO-AVX512BW: # BB#0:
-; NO-AVX512BW-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm2
+; NO-AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65536,65536,65536,65536,65536,65536,65536,65536]
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f32xi16_i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpbroadcastd {{\.LCPI.*}}, %zmm1
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} zmm1 = [65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536]
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
;
-; AVX-LABEL: f32xi16_i32:
+; AVX-64-LABEL: f32xi16_i32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm3 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
+; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f32xi16_i32:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65536,65536,65536,65536,65536,65536,65536,65536]
+; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f32xi16_i32:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vpbroadcastd {{.*#+}} zmm1 = [65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536]
+; AVX512BW-64-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
+ %res1 = add <32 x i16> <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>, %a
+ %res2 = and <32 x i16> <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>, %res1
+ ret <32 x i16> %res2
+}
+
+
+define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
+; AVX-LABEL: f32xi16_i64:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm3
+; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddw %xmm3, %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -601,43 +910,69 @@ define <32 x i16> @f32xi16_i32(<32 x i16> %a) {
; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddw %xmm3, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
- %res1 = add <32 x i16> <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>, %a
- %res2 = and <32 x i16> <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>, %res1
- ret <32 x i16> %res2
-}
-
-
-; ALL64: .LCPI18
-; ALL64-NEXT: .quad 844433520132096 # 0x3000200010000
-
-; ALL32: .LCPI18
-; ALL32-NEXT: .quad 844433520132096 # double 4.1720559249406128E-309
-
-; AVX: .LCPI18
-; AVX-NEXT: .quad 844433520132096 # double 4.1720559249406128E-309
-
-define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
+; AVX-NEXT: retl
+;
; NO-AVX512BW-LABEL: f32xi16_i64:
; NO-AVX512BW: # BB#0:
-; NO-AVX512BW-NEXT: vpbroadcastq {{.*}}, %ymm2
+; NO-AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309]
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f32xi16_i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpbroadcastq {{.*}}, %zmm1
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309]
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
;
-; AVX-LABEL: f32xi16_i64:
+; AVX-64-LABEL: f32xi16_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f32xi16_i64:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [844433520132096,844433520132096,844433520132096,844433520132096]
+; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f32xi16_i64:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096]
+; AVX512BW-64-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
+ %res1 = add <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3>, %a
+ %res2 = and <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3>, %res1
+ ret <32 x i16> %res2
+}
+
+
+define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
+; AVX-LABEL: f32xi16_i128:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7]
; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddw %xmm3, %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -645,87 +980,151 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddw %xmm3, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
- %res1 = add <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3>, %a
- %res2 = and <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3, i16 0, i16 1, i16 2, i16 3>, %res1
- ret <32 x i16> %res2
-}
-
-
-; ALL: .LCPI19
-; ALL-NEXT: .short 0 # 0x0
-; ALL-NEXT: .short 1 # 0x1
-; ALL-NEXT: .short 2 # 0x2
-; ALL-NEXT: .short 3 # 0x3
-; ALL-NEXT: .short 4 # 0x4
-; ALL-NEXT: .short 5 # 0x5
-; ALL-NEXT: .short 6 # 0x6
-; ALL-NEXT: .short 7 # 0x7
-; ALL-NOT: .short
-
-define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
+; AVX-NEXT: retl
+;
; NO-AVX512BW-LABEL: f32xi16_i128:
; NO-AVX512BW: # BB#0:
-; NO-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; NO-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; NO-AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f32xi16_i128:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
+;
+; AVX-64-LABEL: f32xi16_i128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7]
+; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f32xi16_i128:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; NO-AVX512BW-64-NEXT: # ymm2 = mem[0,1,0,1]
+; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f32xi16_i128:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512BW-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-64-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
%res1 = add <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, %a
%res2 = and <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, %res1
ret <32 x i16> %res2
}
-; AVX512BW: .LCPI20
-; AVX512BW-NEXT: .short 0 # 0x0
-; AVX512BW-NEXT: .short 1 # 0x1
-; AVX512BW-NEXT: .short 2 # 0x2
-; AVX512BW-NEXT: .short 3 # 0x3
-; AVX512BW-NEXT: .short 4 # 0x4
-; AVX512BW-NEXT: .short 5 # 0x5
-; AVX512BW-NEXT: .short 6 # 0x6
-; AVX512BW-NEXT: .short 7 # 0x7
-; AVX512BW-NEXT: .short 8 # 0x8
-; AVX512BW-NEXT: .short 9 # 0x9
-; AVX512BW-NEXT: .short 10 # 0xa
-; AVX512BW-NEXT: .short 11 # 0xb
-; AVX512BW-NEXT: .short 12 # 0xc
-; AVX512BW-NEXT: .short 13 # 0xd
-; AVX512BW-NEXT: .short 14 # 0xe
-; AVX512BW-NEXT: .short 15 # 0xf
-; AVX512BW-NOT: .short
-
define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
+; AVX-LABEL: f32xi16_i256:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [8,9,10,11,12,13,14,15]
+; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7]
+; AVX-NEXT: vpaddw %xmm4, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddw %xmm4, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: retl
+;
+; NO-AVX512BW-LABEL: f32xi16_i256:
+; NO-AVX512BW: # BB#0:
+; NO-AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-NEXT: retl
+;
; AVX512BW-LABEL: f32xi16_i256:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retl
+;
+; AVX-64-LABEL: f32xi16_i256:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [8,9,10,11,12,13,14,15]
+; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7]
+; AVX-64-NEXT: vpaddw %xmm4, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddw %xmm4, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; NO-AVX512BW-64-LABEL: f32xi16_i256:
+; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; NO-AVX512BW-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; NO-AVX512BW-64-NEXT: retq
+;
+; AVX512BW-64-LABEL: f32xi16_i256:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-64-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-64-NEXT: retq
%res1 = add <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, %a
%res2 = and <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, %res1
ret <32 x i16> %res2
}
-; ALL64: .LCPI21
-; ALL64-NEXT: .quad 4294967296 # 0x100000000
-
-; ALL32: .LCPI21
-; ALL32-NEXT: .quad 4294967296 # double 2.1219957909652723E-314
-
-; AVX: .LCPI21
-; AVX-NEXT: .quad 4294967296 # double 2.1219957909652723E-314
define <4 x i32> @f4xi32_i64(<4 x i32> %a) {
+; AVX-LABEL: f4xi32_i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f4xi32_i64:
; ALL32: # BB#0:
; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
@@ -733,40 +1132,26 @@ define <4 x i32> @f4xi32_i64(<4 x i32> %a) {
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f4xi32_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f4xi32_i64:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastq {{.*}}(%rip), %xmm1
+; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4294967296,4294967296]
; ALL64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f4xi32_i64:
-; AVX: # BB#0:
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
-; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
%res1 = add <4 x i32> <i32 0, i32 1, i32 0, i32 1>, %a
%res2 = and <4 x i32> <i32 0, i32 1, i32 0, i32 1>, %res1
ret <4 x i32> %res2
}
-; ALL64: .LCPI22
-; ALL64-NEXT: .quad 4294967296 # 0x100000000
-
-; ALL32: .LCPI22
-; ALL32-NEXT: .quad 4294967296 # double 2.1219957909652723E-314
-
-; AVX: .LCPI22
-; AVX-NEXT: .quad 4294967296 # double 2.1219957909652723E-314
-
define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
-; ALL-LABEL: f8xi32_i64:
-; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastq {{.*}}, %ymm1
-; ALL-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vpand %ymm1, %ymm0, %ymm0
-;
; AVX-LABEL: f8xi32_i64:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -775,59 +1160,154 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f8xi32_i64:
+; ALL32: # BB#0:
+; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314]
+; ALL32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f8xi32_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX-64-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f8xi32_i64:
+; ALL64: # BB#0:
+; ALL64-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967296,4294967296,4294967296,4294967296]
+; ALL64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: retq
%res1 = add <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>, %a
%res2 = and <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>, %res1
ret <8 x i32> %res2
}
-; ALL: .LCPI23
-; ALL-NEXT: .long 0 # 0x0
-; ALL-NEXT: .long 1 # 0x1
-; ALL-NEXT: .long 2 # 0x2
-; ALL-NEXT: .long 3 # 0x3
-; ALL-NOT: .long
-
define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
-; ALL-LABEL: f8xi32_i128:
-; ALL: # BB#0:
-; ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; ALL-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX-LABEL: f8xi32_i128:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3]
+; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f8xi32_i128:
+; ALL32: # BB#0:
+; ALL32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,0,1,2,3]
+; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f8xi32_i128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3]
+; AVX-64-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f8xi32_i128:
+; ALL64: # BB#0:
+; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,0,1,2,3]
+; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: retq
%res1 = add <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>, %a
%res2 = and <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>, %res1
ret <8 x i32> %res2
}
-; ALL64: .LCPI24
-; ALL64-NEXT: .quad 4294967296 # 0x100000000
-
-; ALL32: .LCPI24
-; ALL32-NEXT: .quad 4294967296 # double 2.1219957909652723E-314
-
-; AVX: .LCPI24
-; AVX-NEXT: .quad 4294967296 # double 2.1219957909652723E-314
-
define <16 x i32> @f16xi32_i64(<16 x i32> %a) {
+; AVX-LABEL: f16xi32_i64:
+; AVX: # BB#0:
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpaddd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1]
+; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: retl
+;
; AVX2-LABEL: f16xi32_i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastq {{.*}}, %ymm2
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314]
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xi32_i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastq {{.*}}, %zmm1
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm1 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314]
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retl
;
-; AVX-LABEL: f16xi32_i64:
+; AVX-64-LABEL: f16xi32_i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddd %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; AVX2-64-LABEL: f16xi32_i64:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4294967296,4294967296,4294967296,4294967296]
+; AVX2-64-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: f16xi32_i64:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4294967296,4294967296,4294967296,4294967296,4294967296,4294967296,4294967296,4294967296]
+; AVX512F-64-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: retq
+ %res1 = add <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>, %a
+ %res2 = and <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>, %res1
+ ret <16 x i32> %res2
+}
+
+
+define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
+; AVX-LABEL: f16xi32_i128:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -835,51 +1315,103 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) {
; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddd %xmm3, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1]
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3]
; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
- %res1 = add <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>, %a
- %res2 = and <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>, %res1
- ret <16 x i32> %res2
-}
-
-
-; ALL: .LCPI25
-; ALL-NEXT: .long 0 # 0x0
-; ALL-NEXT: .long 1 # 0x1
-; ALL-NEXT: .long 2 # 0x2
-; ALL-NEXT: .long 3 # 0x3
-; ALL-NOT: .long
-
-define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
+; AVX-NEXT: retl
+;
; AVX2-LABEL: f16xi32_i128:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3]
+; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xi32_i128:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retl
+;
+; AVX-64-LABEL: f16xi32_i128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddd %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; AVX2-64-LABEL: f16xi32_i128:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3]
+; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX2-64-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: f16xi32_i128:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: vpandd %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: retq
%res1 = add <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>, %a
%res2 = and <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>, %res1
ret <16 x i32> %res2
}
-; ALL64: .LCPI26
-; ALL64-NEXT: .quad 0 # 0x0
-; ALL64-NEXT: .quad 1 # 0x1
-; ALL64-NOT: .quad
-
define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
+; AVX-LABEL: f4xi64_i128:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,1,0,0,0,1,0]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f4xi64_i128:
+; ALL32: # BB#0:
+; ALL32-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,1,0,0,0,1,0]
+; ALL32-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f4xi64_i128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-64-NEXT: movl $1, %eax
+; AVX-64-NEXT: vmovq %rax, %xmm2
+; AVX-64-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
+; AVX-64-NEXT: vpaddq %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f4xi64_i128:
; ALL64: # BB#0:
-; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,0,1]
+; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL64-NEXT: retq
@@ -889,15 +1421,62 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
}
-; ALL64: .LCPI27
-; ALL64-NEXT: .quad 0 # 0x0
-; ALL64-NEXT: .quad 1 # 0x1
-; ALL64-NOT: .quad
-
define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
+; AVX-LABEL: f8xi64_i128:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,1,0,0,0,1,0]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX-NEXT: vpaddq %xmm3, %xmm4, %xmm4
+; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX-NEXT: vpaddq %xmm3, %xmm4, %xmm3
+; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: retl
+;
+; AVX2-LABEL: f8xi64_i128:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,1,0,0,0,1,0]
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retl
+;
+; AVX512-LABEL: f8xi64_i128:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0]
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retl
+;
+; AVX-64-LABEL: f8xi64_i128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: movl $1, %eax
+; AVX-64-NEXT: vmovq %rax, %xmm3
+; AVX-64-NEXT: vpslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6,7]
+; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddq %xmm3, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [0,1,0,1]
+; AVX-64-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
; AVX2-64-LABEL: f8xi64_i128:
; AVX2-64: # BB#0:
-; AVX2-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX2-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,0,1]
+; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-64-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-64-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -906,57 +1485,99 @@ define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
;
; AVX512F-64-LABEL: f8xi64_i128:
; AVX512F-64: # BB#0:
-; AVX512F-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,0,1,0,1,0,1]
+; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: retq
-;
-; AVX512BW-64-LABEL: f8xi64_i128:
-; AVX512BW-64: # BB#0:
-; AVX512BW-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512BW-64-NEXT: retq
%res1 = add <8 x i64> <i64 0, i64 1, i64 0, i64 1, i64 0, i64 1, i64 0, i64 1>, %a
%res2 = and <8 x i64> <i64 0, i64 1, i64 0, i64 1, i64 0, i64 1, i64 0, i64 1>, %res1
ret <8 x i64> %res2
}
-; ALL64: .LCPI28
-; ALL64-NEXT: .quad 0 # 0x0
-; ALL64-NEXT: .quad 1 # 0x1
-; ALL64-NEXT: .quad 2 # 0x2
-; ALL64-NEXT: .quad 3 # 0x3
-; ALL64-NOT: .quad
-
define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
+; AVX-LABEL: f8xi64_i256:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,1,0,2,0,3,0]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX-NEXT: vpaddq %xmm3, %xmm4, %xmm4
+; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX-NEXT: vpaddq %xmm3, %xmm4, %xmm3
+; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: retl
+;
+; AVX2-LABEL: f8xi64_i256:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,1,0,2,0,3,0]
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retl
+;
+; AVX512-LABEL: f8xi64_i256:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,0,1,0,2,0,3,0,0,0,1,0,2,0,3,0]
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retl
+;
+; AVX-64-LABEL: f8xi64_i256:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3]
+; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: movl $1, %eax
+; AVX-64-NEXT: vmovq %rax, %xmm4
+; AVX-64-NEXT: vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
+; AVX-64-NEXT: vpaddq %xmm4, %xmm1, %xmm1
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX-64-NEXT: vpaddq %xmm4, %xmm0, %xmm0
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [0,1,2,3]
+; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: retq
+;
+; AVX2-64-LABEL: f8xi64_i256:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3]
+; AVX2-64-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: retq
+;
; AVX512F-64-LABEL: f8xi64_i256:
; AVX512F-64: # BB#0:
-; AVX512F-64-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: retq
-;
-; AVX512BW-64-LABEL: f8xi64_i256:
-; AVX512BW-64: # BB#0:
-; AVX512BW-64-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512BW-64-NEXT: retq
%res1 = add <8 x i64> <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>, %a
%res2 = and <8 x i64> <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>, %res1
ret <8 x i64> %res2
}
-; ALL: .LCPI29
-; ALL-NEXT: .quad 4575657222482165760
-
-; AVX: .LCPI29
-; AVX-NEXT: .quad 4575657222482165760 # double 0.0078125018626451492
-
define <4 x float> @f4xf32_f64(<4 x float> %a) {
+; AVX-LABEL: f4xf32_f64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f4xf32_f64:
; ALL32: # BB#0:
; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
@@ -964,221 +1585,367 @@ define <4 x float> @f4xf32_f64(<4 x float> %a) {
; ALL32-NEXT: vdivps %xmm0, %xmm1, %xmm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f4xf32_f64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-64-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: vdivps %xmm0, %xmm1, %xmm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f4xf32_f64:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastq {{.*}}(%rip), %xmm1
+; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760]
; ALL64-NEXT: vaddps %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vdivps %xmm0, %xmm1, %xmm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f4xf32_f64:
-; AVX: # BB#0:
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
-; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0
%res1 = fadd <4 x float> <float 2.0, float 1.0, float 2.0, float 1.0>, %a
%res2 = fdiv <4 x float> <float 2.0, float 1.0, float 2.0, float 1.0>, %res1
ret <4 x float> %res2
}
-; ALL64: .LCPI30
-; ALL64-NEXT: .quad 4575657222482165760 # 0x3f80000040000000
-
-; ALL32: .LCPI30
-; ALL32-NEXT: .quad 4575657222482165760 # double 0.0078125018626451492
-
-; AVX: .LCPI30
-; AVX-NEXT: .quad 4575657222482165760 # double 0.0078125018626451492
-
define <8 x float> @f8xf32_f64(<8 x float> %a) {
-; ALL-LABEL: f8xf32_f64:
-; ALL: # BB#0:
-; ALL-NEXT: vbroadcastsd {{.*}}, %ymm1
-; ALL-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vdivps %ymm0, %ymm1, %ymm0
-;
; AVX-LABEL: f8xf32_f64:
; AVX: # BB#0:
-; AVX-NEXT: vbroadcastsd {{\.LCPI.*}}, %ymm1
+; AVX-NEXT: vbroadcastsd {{.*#+}} ymm1 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f8xf32_f64:
+; ALL32: # BB#0:
+; ALL32-NEXT: vbroadcastsd {{.*#+}} ymm1 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
+; ALL32-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f8xf32_f64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm1 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
+; AVX-64-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-64-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f8xf32_f64:
+; ALL64: # BB#0:
+; ALL64-NEXT: vbroadcastsd {{.*#+}} ymm1 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760]
+; ALL64-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; ALL64-NEXT: retq
%res1 = fadd <8 x float> <float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0>, %a
%res2 = fdiv <8 x float> <float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0>, %res1
ret <8 x float> %res2
}
-; ALL: .LCPI31
-; ALL-NEXT: .long 1082130432 # float 4
-; ALL-NEXT: .long 1065353216 # float 1
-; ALL-NEXT: .long 1073741824 # float 2
-; ALL-NEXT: .long 1077936128 # float 3
-; ALL-NOT: .long
-
define <8 x float> @f8xf32_f128(<8 x float> %a) {
-; ALL-LABEL: f8xf32_f128:
-; ALL: # BB#0:
-; ALL-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; ALL-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vdivps %ymm0, %ymm1, %ymm0
-;
; AVX-LABEL: f8xf32_f128:
; AVX: # BB#0:
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f8xf32_f128:
+; ALL32: # BB#0:
+; ALL32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL32-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f8xf32_f128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-64-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX-64-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-64-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f8xf32_f128:
+; ALL64: # BB#0:
+; ALL64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL64-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; ALL64-NEXT: retq
%res1 = fadd <8 x float> <float 4.0, float 1.0, float 2.0, float 3.0, float 4.0, float 1.0, float 2.0, float 3.0>, %a
%res2 = fdiv <8 x float> <float 4.0, float 1.0, float 2.0, float 3.0, float 4.0, float 1.0, float 2.0, float 3.0>, %res1
ret <8 x float> %res2
}
-; ALL64: .LCPI32
-; ALL64-NEXT: .quad 4575657222482165760 # 0x3f80000040000000
-
-; ALL32: .LCPI32
-; ALL32-NEXT: .quad 4575657222482165760 # double 0.0078125018626451492
-
-; AVX: .LCPI32
-; AVX-NEXT: .quad 4575657222482165760 # double 0.0078125018626451492
-
define <16 x float> @f16xf32_f64(<16 x float> %a) {
+; AVX-LABEL: f16xf32_f64:
+; AVX: # BB#0:
+; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
+; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX-NEXT: retl
+;
; AVX2-LABEL: f16xf32_f64:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcastsd {{.*}}, %ymm2
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vdivps %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xf32_f64:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcastsd {{.*}}, %zmm1
+; AVX512-NEXT: vbroadcastsd {{.*#+}} zmm1 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: retl
;
-; AVX-LABEL: f16xf32_f64:
-; AVX: # BB#0:
-; AVX-NEXT: vbroadcastsd {{\.LCPI.*}}, %ymm2
-; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
-; AVX-NEXT: vdivps %ymm0, %ymm2, %ymm0
-; AVX-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX-64-LABEL: f16xf32_f64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
+; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX-64-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX-64-NEXT: retq
+;
+; AVX2-64-LABEL: f16xf32_f64:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760]
+; AVX2-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX2-64-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: f16xf32_f64:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vbroadcastsd {{.*#+}} zmm1 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760]
+; AVX512F-64-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: vdivps %zmm0, %zmm1, %zmm0
+; AVX512F-64-NEXT: retq
%res1 = fadd <16 x float> <float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0>, %a
%res2 = fdiv <16 x float> <float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0, float 2.0, float 1.0>, %res1
ret <16 x float> %res2
}
-; ALL: .LCPI33
-; ALL-NEXT: .long 1082130432 # float 4
-; ALL-NEXT: .long 1065353216 # float 1
-; ALL-NEXT: .long 1073741824 # float 2
-; ALL-NEXT: .long 1077936128 # float 3
-; ALL-NOT: .long
-
define <16 x float> @f16xf32_f128(<16 x float> %a) {
+; AVX-LABEL: f16xf32_f128:
+; AVX: # BB#0:
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX-NEXT: retl
+;
; AVX2-LABEL: f16xf32_f128:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vdivps %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xf32_f128:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: retl
;
-; AVX-LABEL: f16xf32_f128:
-; AVX: # BB#0:
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
-; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
-; AVX-NEXT: vdivps %ymm0, %ymm2, %ymm0
-; AVX-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX-64-LABEL: f16xf32_f128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-64-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX-64-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX-64-NEXT: retq
+;
+; AVX2-64-LABEL: f16xf32_f128:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX2-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX2-64-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: f16xf32_f128:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: vdivps %zmm0, %zmm1, %zmm0
+; AVX512F-64-NEXT: retq
%res1 = fadd <16 x float> <float 4.0, float 1.0, float 2.0, float 3.0, float 4.0, float 1.0, float 2.0, float 3.0, float 4.0, float 1.0, float 2.0, float 3.0, float 4.0, float 1.0, float 2.0, float 3.0>, %a
%res2 = fdiv <16 x float> <float 4.0, float 1.0, float 2.0, float 3.0, float 4.0, float 1.0, float 2.0, float 3.0, float 4.0, float 1.0, float 2.0, float 3.0, float 4.0, float 1.0, float 2.0, float 3.0>, %res1
ret <16 x float> %res2
}
-; AVX512: .LCPI34
-; AVX512-NEXT: .long 1090519040 # float 8
-; AVX512-NEXT: .long 1065353216 # float 1
-; AVX512-NEXT: .long 1073741824 # float 2
-; AVX512-NEXT: .long 1077936128 # float 3
-; AVX512-NEXT: .long 1082130432 # float 4
-; AVX512-NEXT: .long 1084227584 # float 5
-; AVX512-NEXT: .long 1086324736 # float 6
-; AVX512-NEXT: .long 1088421888 # float 7
-; AVX512-NOT: .long
-
define <16 x float> @f16xf32_f256(<16 x float> %a) {
+; AVX-LABEL: f16xf32_f256:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX-NEXT: retl
+;
+; AVX2-LABEL: f16xf32_f256:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: retl
+;
; AVX512-LABEL: f16xf32_f256:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: retl
+;
+; AVX-64-LABEL: f16xf32_f256:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX-64-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX-64-NEXT: retq
+;
+; AVX2-64-LABEL: f16xf32_f256:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX2-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vdivps %ymm0, %ymm2, %ymm0
+; AVX2-64-NEXT: vdivps %ymm1, %ymm2, %ymm1
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: f16xf32_f256:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: vdivps %zmm0, %zmm1, %zmm0
+; AVX512F-64-NEXT: retq
%res1 = fadd <16 x float> <float 8.0, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0>, %a
%res2 = fdiv <16 x float> <float 8.0, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0>, %res1
ret <16 x float> %res2
}
-; ALL: .LCPI35
-; ALL-NEXT: .quad 4611686018427387904 # double 2
-; ALL-NEXT: .quad 4607182418800017408 # double 1
-; ALL-NOT: .quad
-
define <4 x double> @f4xf64_f128(<4 x double> %a) {
-; ALL-LABEL: f4xf64_f128:
-; ALL: # BB#0:
-; ALL-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; ALL-NEXT: vaddpd %ymm1, %ymm0, %ymm0
-; ALL-NEXT: vdivpd %ymm0, %ymm1, %ymm0
-;
; AVX-LABEL: f4xf64_f128:
; AVX: # BB#0:
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vdivpd %ymm0, %ymm1, %ymm0
+; AVX-NEXT: retl
+;
+; ALL32-LABEL: f4xf64_f128:
+; ALL32: # BB#0:
+; ALL32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; ALL32-NEXT: vdivpd %ymm0, %ymm1, %ymm0
+; ALL32-NEXT: retl
+;
+; AVX-64-LABEL: f4xf64_f128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX-64-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX-64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-64-NEXT: vdivpd %ymm0, %ymm1, %ymm0
+; AVX-64-NEXT: retq
+;
+; ALL64-LABEL: f4xf64_f128:
+; ALL64: # BB#0:
+; ALL64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
+; ALL64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; ALL64-NEXT: vdivpd %ymm0, %ymm1, %ymm0
+; ALL64-NEXT: retq
%res1 = fadd <4 x double> <double 2.0, double 1.0, double 2.0, double 1.0>, %a
%res2 = fdiv <4 x double> <double 2.0, double 1.0, double 2.0, double 1.0>, %res1
ret <4 x double> %res2
}
-; ALL: .LCPI36
-; ALL-NEXT: .quad 4611686018427387904 # double 2
-; ALL-NEXT: .quad 4607182418800017408 # double 1
-; ALL-NOT: .quad
-
define <8 x double> @f8xf64_f128(<8 x double> %a) {
+; AVX-LABEL: f8xf64_f128:
+; AVX: # BB#0:
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vdivpd %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX-NEXT: retl
+;
; AVX2-LABEL: f8xf64_f128:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vdivpd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: retl
;
; AVX512-LABEL: f8xf64_f128:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivpd %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: retl
;
-; AVX-LABEL: f8xf64_f128:
-; AVX: # BB#0:
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
-; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vaddpd %ymm2, %ymm0, %ymm0
-; AVX-NEXT: vdivpd %ymm0, %ymm2, %ymm0
-; AVX-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX-64-LABEL: f8xf64_f128:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX-64-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vdivpd %ymm0, %ymm2, %ymm0
+; AVX-64-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX-64-NEXT: retq
+;
+; AVX2-64-LABEL: f8xf64_f128:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX2-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vdivpd %ymm0, %ymm2, %ymm0
+; AVX2-64-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: f8xf64_f128:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: vdivpd %zmm0, %zmm1, %zmm0
+; AVX512F-64-NEXT: retq
%res1 = fadd <8 x double> <double 2.0, double 1.0, double 2.0, double 1.0, double 2.0, double 1.0, double 2.0, double 1.0>, %a
%res2 = fdiv <8 x double> <double 2.0, double 1.0, double 2.0, double 1.0, double 2.0, double 1.0, double 2.0, double 1.0>, %res1
ret <8 x double> %res2
@@ -1193,11 +1960,57 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
; AVX512-NOT: .quad
define <8 x double> @f8xf64_f256(<8 x double> %a) {
+; AVX-LABEL: f8xf64_f256:
+; AVX: # BB#0:
+; AVX-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vdivpd %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX-NEXT: retl
+;
+; AVX2-LABEL: f8xf64_f256:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vdivpd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: retl
+;
; AVX512-LABEL: f8xf64_f256:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivpd %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: retl
+;
+; AVX-64-LABEL: f8xf64_f256:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX-64-NEXT: vdivpd %ymm0, %ymm2, %ymm0
+; AVX-64-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX-64-NEXT: retq
+;
+; AVX2-64-LABEL: f8xf64_f256:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX2-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vdivpd %ymm0, %ymm2, %ymm0
+; AVX2-64-NEXT: vdivpd %ymm1, %ymm2, %ymm1
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: f8xf64_f256:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-64-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; AVX512F-64-NEXT: vdivpd %zmm0, %zmm1, %zmm0
+; AVX512F-64-NEXT: retq
%res1 = fadd <8 x double> <double 4.0, double 1.0, double 2.0, double 3.0, double 4.0, double 1.0, double 2.0, double 3.0>, %a
%res2 = fdiv <8 x double> <double 4.0, double 1.0, double 2.0, double 3.0, double 4.0, double 1.0, double 2.0, double 3.0>, %res1
ret <8 x double> %res2
@@ -1205,32 +2018,34 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
-; ALL: .LCPI38
-; ALL-NEXT: .long 4290379776 # 0xffba0000
-
-; AVX: .LCPI38
-; AVX-NEXT: .long 4290379776 # float NaN
-
define <8 x i16> @f8xi16_i32_NaN(<8 x i16> %a) {
+; AVX-LABEL: f8xi16_i32_NaN:
+; AVX: # BB#0:
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+;
; ALL32-LABEL: f8xi16_i32_NaN:
; ALL32: # BB#0:
-; ALL32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
+; ALL32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4290379776,4290379776,4290379776,4290379776]
; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
+; AVX-64-LABEL: f8xi16_i32_NaN:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
+; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+;
; ALL64-LABEL: f8xi16_i32_NaN:
; ALL64: # BB#0:
-; ALL64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; ALL64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4290379776,4290379776,4290379776,4290379776]
; ALL64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL64-NEXT: retq
-;
-; AVX-LABEL: f8xi16_i32_NaN:
-; AVX: # BB#0:
-; AVX-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm1
-; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
%res1 = add <8 x i16> <i16 0, i16 -70, i16 0, i16 -70, i16 0, i16 -70, i16 0, i16 -70>, %a
%res2 = and <8 x i16> <i16 0, i16 -70, i16 0, i16 -70, i16 0, i16 -70, i16 0, i16 -70>, %res1
ret <8 x i16> %res2
diff --git a/test/CodeGen/X86/bswap-wide-int.ll b/test/CodeGen/X86/bswap-wide-int.ll
index db48eb80de4b..858dbf5fd85f 100644
--- a/test/CodeGen/X86/bswap-wide-int.ll
+++ b/test/CodeGen/X86/bswap-wide-int.ll
@@ -71,8 +71,8 @@ define i128 @bswap_i128(i128 %a0) nounwind {
; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-MOVBE-NEXT: movbel %esi, 12(%eax)
; X86-MOVBE-NEXT: movbel %edi, 8(%eax)
-; X86-MOVBE-NEXT: movbel %ecx, 4(%eax)
-; X86-MOVBE-NEXT: movbel %edx, (%eax)
+; X86-MOVBE-NEXT: movbel %edx, 4(%eax)
+; X86-MOVBE-NEXT: movbel %ecx, (%eax)
; X86-MOVBE-NEXT: popl %esi
; X86-MOVBE-NEXT: popl %edi
; X86-MOVBE-NEXT: retl $4
diff --git a/test/CodeGen/X86/build-vector-128.ll b/test/CodeGen/X86/build-vector-128.ll
index c73d7654045e..531c6de5f90c 100644
--- a/test/CodeGen/X86/build-vector-128.ll
+++ b/test/CodeGen/X86/build-vector-128.ll
@@ -72,12 +72,10 @@ define <4 x float> @test_buildvector_v4f32(float %a0, float %a1, float %a2, floa
}
define <2 x i64> @test_buildvector_v2i64(i64 %a0, i64 %a1) {
-; SSE2-32-LABEL: test_buildvector_v2i64:
-; SSE2-32: # BB#0:
-; SSE2-32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; SSE2-32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-32-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-32-NEXT: retl
+; SSE-32-LABEL: test_buildvector_v2i64:
+; SSE-32: # BB#0:
+; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
+; SSE-32-NEXT: retl
;
; SSE-64-LABEL: test_buildvector_v2i64:
; SSE-64: # BB#0:
@@ -86,20 +84,9 @@ define <2 x i64> @test_buildvector_v2i64(i64 %a0, i64 %a1) {
; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-64-NEXT: retq
;
-; SSE41-32-LABEL: test_buildvector_v2i64:
-; SSE41-32: # BB#0:
-; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE41-32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0
-; SSE41-32-NEXT: pinsrd $2, {{[0-9]+}}(%esp), %xmm0
-; SSE41-32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0
-; SSE41-32-NEXT: retl
-;
; AVX-32-LABEL: test_buildvector_v2i64:
; AVX-32: # BB#0:
-; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v2i64:
diff --git a/test/CodeGen/X86/build-vector-256.ll b/test/CodeGen/X86/build-vector-256.ll
index 1ced1fc3a382..942b7779abe6 100644
--- a/test/CodeGen/X86/build-vector-256.ll
+++ b/test/CodeGen/X86/build-vector-256.ll
@@ -51,18 +51,10 @@ define <8 x float> @test_buildvector_v8f32(float %a0, float %a1, float %a2, floa
}
define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
-; AVX1-32-LABEL: test_buildvector_v4i64:
-; AVX1-32: # BB#0:
-; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX1-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX1-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX1-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX1-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX1-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-32-NEXT: retl
+; AVX-32-LABEL: test_buildvector_v4i64:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0
+; AVX-32-NEXT: retl
;
; AVX1-64-LABEL: test_buildvector_v4i64:
; AVX1-64: # BB#0:
@@ -75,19 +67,6 @@ define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
; AVX1-64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-64-NEXT: retq
;
-; AVX2-32-LABEL: test_buildvector_v4i64:
-; AVX2-32: # BB#0:
-; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX2-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX2-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX2-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX2-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX2-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX2-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-32-NEXT: retl
-;
; AVX2-64-LABEL: test_buildvector_v4i64:
; AVX2-64: # BB#0:
; AVX2-64-NEXT: vmovq %rcx, %xmm0
diff --git a/test/CodeGen/X86/build-vector-512.ll b/test/CodeGen/X86/build-vector-512.ll
index 21737cca93a1..fbfbf2d53c63 100644
--- a/test/CodeGen/X86/build-vector-512.ll
+++ b/test/CodeGen/X86/build-vector-512.ll
@@ -79,25 +79,7 @@ define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, fl
define <8 x i64> @test_buildvector_v8i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7) {
; AVX-32-LABEL: test_buildvector_v8i64:
; AVX-32: # BB#0:
-; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; AVX-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm2, %xmm2
-; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm2, %xmm2
-; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm2, %xmm2
-; AVX-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v8i64:
diff --git a/test/CodeGen/X86/cast-vsel.ll b/test/CodeGen/X86/cast-vsel.ll
index 83ab2fac2f16..260535985e2d 100644
--- a/test/CodeGen/X86/cast-vsel.ll
+++ b/test/CodeGen/X86/cast-vsel.ll
@@ -148,7 +148,7 @@ define <4 x double> @fpext(<4 x double> %a, <4 x double> %b, <4 x float> %c, <4
; SSE2-NEXT: andnps %xmm5, %xmm0
; SSE2-NEXT: orps %xmm4, %xmm0
; SSE2-NEXT: cvtps2pd %xmm0, %xmm2
-; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE2-NEXT: cvtps2pd %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
diff --git a/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/test/CodeGen/X86/clear_upper_vector_element_bits.ll
index a6bc5aa321fa..e2a4368b255a 100644
--- a/test/CodeGen/X86/clear_upper_vector_element_bits.ll
+++ b/test/CodeGen/X86/clear_upper_vector_element_bits.ll
@@ -1063,87 +1063,89 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
;
; AVX1-LABEL: _clearupper32xi8b:
; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: pushq %r12
; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: vpextrq $1, %xmm0, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %r14
+; AVX1-NEXT: vmovq %xmm0, %rcx
+; AVX1-NEXT: movq %rcx, %r8
+; AVX1-NEXT: movq %rcx, %r9
+; AVX1-NEXT: movq %rcx, %r10
+; AVX1-NEXT: movq %rcx, %r11
+; AVX1-NEXT: movq %rcx, %r14
+; AVX1-NEXT: movq %rcx, %r15
; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: movq %rdx, %r9
-; AVX1-NEXT: movq %rdx, %r11
-; AVX1-NEXT: movq %rdx, %rsi
-; AVX1-NEXT: movq %rdx, %rdi
-; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: movq %rdx, %r12
+; AVX1-NEXT: movq %rdx, %r13
+; AVX1-NEXT: movq %rdx, %rbx
; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: movq %rdx, %rbp
; AVX1-NEXT: andb $15, %dl
; AVX1-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: shrq $56, %rax
-; AVX1-NEXT: andb $15, %al
-; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movq %r14, %r10
-; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: movq %rcx, %rdx
; AVX1-NEXT: andb $15, %cl
; AVX1-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movq %r14, %rdx
-; AVX1-NEXT: shrq $40, %rdi
-; AVX1-NEXT: andb $15, %dil
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movq %r14, %rax
-; AVX1-NEXT: shrq $32, %rsi
+; AVX1-NEXT: shrq $56, %rbp
+; AVX1-NEXT: andb $15, %bpl
+; AVX1-NEXT: movb %bpl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $48, %rsi
; AVX1-NEXT: andb $15, %sil
; AVX1-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movq %r14, %rcx
-; AVX1-NEXT: shrq $24, %r11
-; AVX1-NEXT: andb $15, %r11b
-; AVX1-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movq %r14, %rsi
-; AVX1-NEXT: shrq $16, %r9
-; AVX1-NEXT: andb $15, %r9b
-; AVX1-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movq %r14, %rdi
-; AVX1-NEXT: shrq $8, %r8
-; AVX1-NEXT: andb $15, %r8b
-; AVX1-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movq %r14, %rbx
-; AVX1-NEXT: andb $15, %r14b
-; AVX1-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: shrq $8, %r10
-; AVX1-NEXT: shrq $16, %rdx
-; AVX1-NEXT: shrq $24, %rax
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: shrq $40, %rsi
-; AVX1-NEXT: shrq $48, %rdi
-; AVX1-NEXT: shrq $56, %rbx
-; AVX1-NEXT: andb $15, %bl
-; AVX1-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $40, %rdi
; AVX1-NEXT: andb $15, %dil
; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: andb $15, %sil
-; AVX1-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: andb $15, %cl
-; AVX1-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $32, %rax
; AVX1-NEXT: andb $15, %al
; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $24, %rbx
+; AVX1-NEXT: andb $15, %bl
+; AVX1-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $16, %r13
+; AVX1-NEXT: andb $15, %r13b
+; AVX1-NEXT: movb %r13b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $8, %r12
+; AVX1-NEXT: andb $15, %r12b
+; AVX1-NEXT: movb %r12b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $8, %r8
+; AVX1-NEXT: shrq $16, %r9
+; AVX1-NEXT: shrq $24, %r10
+; AVX1-NEXT: shrq $32, %r11
+; AVX1-NEXT: shrq $40, %r14
+; AVX1-NEXT: shrq $48, %r15
+; AVX1-NEXT: shrq $56, %rdx
; AVX1-NEXT: andb $15, %dl
; AVX1-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r15b
+; AVX1-NEXT: movb %r15b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r14b
+; AVX1-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r11b
+; AVX1-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: andb $15, %r10b
; AVX1-NEXT: movb %r10b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r9b
+; AVX1-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r8b
+; AVX1-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: movq %rax, %r8
+; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movq %rax, %rsi
; AVX1-NEXT: movq %rax, %rdi
+; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: movl %eax, %ebx
-; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: shrl $8, %eax
; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: shrl $16, %ecx
-; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shrl $24, %ebx
-; AVX1-NEXT: vpinsrb $3, %ebx, %xmm1, %xmm1
+; AVX1-NEXT: shrl $16, %ebx
+; AVX1-NEXT: vpinsrb $2, %ebx, %xmm1, %xmm1
+; AVX1-NEXT: shrl $24, %ebp
+; AVX1-NEXT: vpinsrb $3, %ebp, %xmm1, %xmm1
; AVX1-NEXT: shrq $32, %rdi
; AVX1-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
; AVX1-NEXT: shrq $40, %rsi
@@ -1153,8 +1155,8 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; AVX1-NEXT: shrq $48, %rdx
; AVX1-NEXT: vpinsrb $6, %edx, %xmm1, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
-; AVX1-NEXT: shrq $56, %r8
-; AVX1-NEXT: vpinsrb $7, %r8d, %xmm1, %xmm0
+; AVX1-NEXT: shrq $56, %rcx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm0
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %ecx
; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
@@ -1222,92 +1224,98 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: popq %r13
; AVX1-NEXT: popq %r14
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper32xi8b:
; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: vpextrq $1, %xmm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %r14
+; AVX2-NEXT: vmovq %xmm0, %rcx
+; AVX2-NEXT: movq %rcx, %r8
+; AVX2-NEXT: movq %rcx, %r9
+; AVX2-NEXT: movq %rcx, %r10
+; AVX2-NEXT: movq %rcx, %r11
+; AVX2-NEXT: movq %rcx, %r14
+; AVX2-NEXT: movq %rcx, %r15
; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: movq %rdx, %r9
-; AVX2-NEXT: movq %rdx, %r11
-; AVX2-NEXT: movq %rdx, %rsi
-; AVX2-NEXT: movq %rdx, %rdi
-; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: movq %rdx, %r12
+; AVX2-NEXT: movq %rdx, %r13
+; AVX2-NEXT: movq %rdx, %rbx
; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: movq %rdx, %rbp
; AVX2-NEXT: andb $15, %dl
; AVX2-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: shrq $56, %rax
-; AVX2-NEXT: andb $15, %al
-; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq %r14, %r10
-; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: movq %rcx, %rdx
; AVX2-NEXT: andb $15, %cl
; AVX2-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq %r14, %rdx
-; AVX2-NEXT: shrq $40, %rdi
-; AVX2-NEXT: andb $15, %dil
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq %r14, %rax
-; AVX2-NEXT: shrq $32, %rsi
+; AVX2-NEXT: shrq $56, %rbp
+; AVX2-NEXT: andb $15, %bpl
+; AVX2-NEXT: movb %bpl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $48, %rsi
; AVX2-NEXT: andb $15, %sil
; AVX2-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq %r14, %rcx
-; AVX2-NEXT: shrq $24, %r11
-; AVX2-NEXT: andb $15, %r11b
-; AVX2-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq %r14, %rsi
-; AVX2-NEXT: shrq $16, %r9
-; AVX2-NEXT: andb $15, %r9b
-; AVX2-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq %r14, %rdi
-; AVX2-NEXT: shrq $8, %r8
-; AVX2-NEXT: andb $15, %r8b
-; AVX2-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq %r14, %rbx
-; AVX2-NEXT: andb $15, %r14b
-; AVX2-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: shrq $8, %r10
-; AVX2-NEXT: shrq $16, %rdx
-; AVX2-NEXT: shrq $24, %rax
-; AVX2-NEXT: shrq $32, %rcx
-; AVX2-NEXT: shrq $40, %rsi
-; AVX2-NEXT: shrq $48, %rdi
-; AVX2-NEXT: shrq $56, %rbx
-; AVX2-NEXT: andb $15, %bl
-; AVX2-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $40, %rdi
; AVX2-NEXT: andb $15, %dil
; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: andb $15, %sil
-; AVX2-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: andb $15, %cl
-; AVX2-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $32, %rax
; AVX2-NEXT: andb $15, %al
; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $24, %rbx
+; AVX2-NEXT: andb $15, %bl
+; AVX2-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $16, %r13
+; AVX2-NEXT: andb $15, %r13b
+; AVX2-NEXT: movb %r13b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $8, %r12
+; AVX2-NEXT: andb $15, %r12b
+; AVX2-NEXT: movb %r12b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $8, %r8
+; AVX2-NEXT: shrq $16, %r9
+; AVX2-NEXT: shrq $24, %r10
+; AVX2-NEXT: shrq $32, %r11
+; AVX2-NEXT: shrq $40, %r14
+; AVX2-NEXT: shrq $48, %r15
+; AVX2-NEXT: shrq $56, %rdx
; AVX2-NEXT: andb $15, %dl
; AVX2-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r15b
+; AVX2-NEXT: movb %r15b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r14b
+; AVX2-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r11b
+; AVX2-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: andb $15, %r10b
; AVX2-NEXT: movb %r10b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r9b
+; AVX2-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r8b
+; AVX2-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: movq %rax, %r8
+; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movq %rax, %rsi
; AVX2-NEXT: movq %rax, %rdi
+; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: movl %eax, %ebx
-; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT: shrl $16, %ecx
-; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shrl $24, %ebx
-; AVX2-NEXT: vpinsrb $3, %ebx, %xmm1, %xmm1
+; AVX2-NEXT: shrl $16, %ebx
+; AVX2-NEXT: vpinsrb $2, %ebx, %xmm1, %xmm1
+; AVX2-NEXT: shrl $24, %ebp
+; AVX2-NEXT: vpinsrb $3, %ebp, %xmm1, %xmm1
; AVX2-NEXT: shrq $32, %rdi
; AVX2-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
; AVX2-NEXT: shrq $40, %rsi
@@ -1317,8 +1325,8 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; AVX2-NEXT: shrq $48, %rdx
; AVX2-NEXT: vpinsrb $6, %edx, %xmm1, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
-; AVX2-NEXT: shrq $56, %r8
-; AVX2-NEXT: vpinsrb $7, %r8d, %xmm1, %xmm0
+; AVX2-NEXT: shrq $56, %rcx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm0
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $8, %ecx
; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
@@ -1386,7 +1394,11 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
%x4 = bitcast <32 x i8> %0 to <64 x i4>
%r0 = insertelement <64 x i4> %x4, i4 zeroinitializer, i32 1
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index d901f16e5c73..fca39bca6c76 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -1,34 +1,36 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-apple-darwin10 -disable-cgp-select2branch | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -disable-cgp-select2branch | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define i32 @test1(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
-entry:
; CHECK-LABEL: test1:
-; CHECK: btl
-; CHECK-NEXT: movl $12, %eax
-; CHECK-NEXT: cmovael (%rcx), %eax
-; CHECK-NEXT: ret
-
- %0 = lshr i32 %x, %n ; <i32> [#uses=1]
- %1 = and i32 %0, 1 ; <i32> [#uses=1]
- %toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: btl %esi, %edi
+; CHECK-NEXT: movl $12, %eax
+; CHECK-NEXT: cmovael (%rcx), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = lshr i32 %x, %n
+ %1 = and i32 %0, 1
+ %toBool = icmp eq i32 %1, 0
%v = load i32, i32* %vp
- %.0 = select i1 %toBool, i32 %v, i32 12 ; <i32> [#uses=1]
+ %.0 = select i1 %toBool, i32 %v, i32 12
ret i32 %.0
}
+
define i32 @test2(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
-entry:
; CHECK-LABEL: test2:
-; CHECK: btl
-; CHECK-NEXT: movl $12, %eax
-; CHECK-NEXT: cmovbl (%rcx), %eax
-; CHECK-NEXT: ret
-
- %0 = lshr i32 %x, %n ; <i32> [#uses=1]
- %1 = and i32 %0, 1 ; <i32> [#uses=1]
- %toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: btl %esi, %edi
+; CHECK-NEXT: movl $12, %eax
+; CHECK-NEXT: cmovbl (%rcx), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = lshr i32 %x, %n
+ %1 = and i32 %0, 1
+ %toBool = icmp eq i32 %1, 0
%v = load i32, i32* %vp
- %.0 = select i1 %toBool, i32 12, i32 %v ; <i32> [#uses=1]
+ %.0 = select i1 %toBool, i32 12, i32 %v
ret i32 %.0
}
@@ -41,10 +43,13 @@ declare void @bar(i64) nounwind
define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
; CHECK-LABEL: test3:
-; CHECK: cmov{{n?}}el %[[R1:e..]], %[[R2:e..]]
-; CHECK-NOT: movl
-; CHECK: call
-
+; CHECK: # BB#0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: testb $1, %dl
+; CHECK-NEXT: cmovel %esi, %edi
+; CHECK-NEXT: callq bar
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
%c = trunc i64 %a to i32
%d = trunc i64 %b to i32
%e = select i1 %p, i32 %c, i32 %d
@@ -65,52 +70,86 @@ define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
; PR4814
-@g_3 = external global i8 ; <i8*> [#uses=1]
-@g_96 = external global i8 ; <i8*> [#uses=2]
-@g_100 = external global i8 ; <i8*> [#uses=2]
-@_2E_str = external constant [15 x i8], align 1 ; <[15 x i8]*> [#uses=1]
+@g_3 = external global i8
+@g_96 = external global i8
+@g_100 = external global i8
+@_2E_str = external constant [15 x i8], align 1
define i1 @test4() nounwind {
+; CHECK-LABEL: test4:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movsbl {{.*}}(%rip), %edx
+; CHECK-NEXT: movl %edx, %eax
+; CHECK-NEXT: shrb $7, %al
+; CHECK-NEXT: movzbl %al, %ecx
+; CHECK-NEXT: xorl $1, %ecx
+; CHECK-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; CHECK-NEXT: sarl %cl, %edx
+; CHECK-NEXT: movb {{.*}}(%rip), %al
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: je .LBB3_2
+; CHECK-NEXT: # BB#1: # %bb.i.i.i
+; CHECK-NEXT: movb {{.*}}(%rip), %cl
+; CHECK-NEXT: .LBB3_2: # %func_4.exit.i
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: testb %dl, %dl
+; CHECK-NEXT: setne %bl
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: je .LBB3_4
+; CHECK-NEXT: # BB#3: # %func_4.exit.i
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: .LBB3_4: # %func_4.exit.i
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: je .LBB3_7
+; CHECK-NEXT: # BB#5: # %func_4.exit.i
+; CHECK-NEXT: testb %bl, %bl
+; CHECK-NEXT: jne .LBB3_7
+; CHECK-NEXT: # BB#6: # %bb.i.i
+; CHECK-NEXT: movb {{.*}}(%rip), %cl
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: .LBB3_7: # %func_1.exit
+; CHECK-NEXT: movb %cl, {{.*}}(%rip)
+; CHECK-NEXT: movzbl %cl, %esi
+; CHECK-NEXT: movl $_2E_str, %edi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: callq printf
+; CHECK-NEXT: movl %ebx, %eax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
entry:
- %0 = load i8, i8* @g_3, align 1 ; <i8> [#uses=2]
- %1 = sext i8 %0 to i32 ; <i32> [#uses=1]
- %.lobit.i = lshr i8 %0, 7 ; <i8> [#uses=1]
- %tmp.i = zext i8 %.lobit.i to i32 ; <i32> [#uses=1]
- %tmp.not.i = xor i32 %tmp.i, 1 ; <i32> [#uses=1]
- %iftmp.17.0.i.i = ashr i32 %1, %tmp.not.i ; <i32> [#uses=1]
- %retval56.i.i = trunc i32 %iftmp.17.0.i.i to i8 ; <i8> [#uses=1]
- %2 = icmp eq i8 %retval56.i.i, 0 ; <i1> [#uses=2]
- %g_96.promoted.i = load i8, i8* @g_96 ; <i8> [#uses=3]
- %3 = icmp eq i8 %g_96.promoted.i, 0 ; <i1> [#uses=2]
+ %0 = load i8, i8* @g_3, align 1
+ %1 = sext i8 %0 to i32
+ %.lobit.i = lshr i8 %0, 7
+ %tmp.i = zext i8 %.lobit.i to i32
+ %tmp.not.i = xor i32 %tmp.i, 1
+ %iftmp.17.0.i.i = ashr i32 %1, %tmp.not.i
+ %retval56.i.i = trunc i32 %iftmp.17.0.i.i to i8
+ %2 = icmp eq i8 %retval56.i.i, 0
+ %g_96.promoted.i = load i8, i8* @g_96
+ %3 = icmp eq i8 %g_96.promoted.i, 0
br i1 %3, label %func_4.exit.i, label %bb.i.i.i
-bb.i.i.i: ; preds = %entry
- %4 = load volatile i8, i8* @g_100, align 1 ; <i8> [#uses=0]
+bb.i.i.i:
+ %4 = load volatile i8, i8* @g_100, align 1
br label %func_4.exit.i
-; CHECK-LABEL: test4:
-; CHECK: g_100
-; CHECK: testb
-; CHECK-NOT: xor
-; CHECK: setne
-; CHECK: testb
-
-func_4.exit.i: ; preds = %bb.i.i.i, %entry
- %.not.i = xor i1 %2, true ; <i1> [#uses=1]
- %brmerge.i = or i1 %3, %.not.i ; <i1> [#uses=1]
- %.mux.i = select i1 %2, i8 %g_96.promoted.i, i8 0 ; <i8> [#uses=1]
+func_4.exit.i:
+ %.not.i = xor i1 %2, true
+ %brmerge.i = or i1 %3, %.not.i
+ %.mux.i = select i1 %2, i8 %g_96.promoted.i, i8 0
br i1 %brmerge.i, label %func_1.exit, label %bb.i.i
-bb.i.i: ; preds = %func_4.exit.i
- %5 = load volatile i8, i8* @g_100, align 1 ; <i8> [#uses=0]
+bb.i.i:
+ %5 = load volatile i8, i8* @g_100, align 1
br label %func_1.exit
-func_1.exit: ; preds = %bb.i.i, %func_4.exit.i
- %g_96.tmp.0.i = phi i8 [ %g_96.promoted.i, %bb.i.i ], [ %.mux.i, %func_4.exit.i ] ; <i8> [#uses=2]
+func_1.exit:
+ %g_96.tmp.0.i = phi i8 [ %g_96.promoted.i, %bb.i.i ], [ %.mux.i, %func_4.exit.i ]
%ret = phi i1 [ 0, %bb.i.i ], [ %.not.i, %func_4.exit.i ]
store i8 %g_96.tmp.0.i, i8* @g_96
- %6 = zext i8 %g_96.tmp.0.i to i32 ; <i32> [#uses=1]
- %7 = tail call i32 (i8*, ...) @printf(i8* noalias getelementptr ([15 x i8], [15 x i8]* @_2E_str, i64 0, i64 0), i32 %6) nounwind ; <i32> [#uses=0]
+ %6 = zext i8 %g_96.tmp.0.i to i32
+ %7 = tail call i32 (i8*, ...) @printf(i8* noalias getelementptr ([15 x i8], [15 x i8]* @_2E_str, i64 0, i64 0), i32 %6) nounwind
ret i1 %ret
}
@@ -120,29 +159,32 @@ declare i32 @printf(i8* nocapture, ...) nounwind
; Should compile to setcc | -2.
; rdar://6668608
define i32 @test5(i32* nocapture %P) nounwind readonly {
-entry:
; CHECK-LABEL: test5:
-; CHECK: xorl %eax, %eax
-; CHECK: setg %al
-; CHECK: orl $-2, %eax
-; CHECK: ret
-
- %0 = load i32, i32* %P, align 4 ; <i32> [#uses=1]
- %1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
- %iftmp.0.0 = select i1 %1, i32 -1, i32 -2 ; <i32> [#uses=1]
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl $41, (%rdi)
+; CHECK-NEXT: setg %al
+; CHECK-NEXT: orl $-2, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = load i32, i32* %P, align 4
+ %1 = icmp sgt i32 %0, 41
+ %iftmp.0.0 = select i1 %1, i32 -1, i32 -2
ret i32 %iftmp.0.0
}
define i32 @test6(i32* nocapture %P) nounwind readonly {
-entry:
; CHECK-LABEL: test6:
-; CHECK: xorl %eax, %eax
-; CHECK: setl %al
-; CHECK: leal 4(%rax,%rax,8), %eax
-; CHECK: ret
- %0 = load i32, i32* %P, align 4 ; <i32> [#uses=1]
- %1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
- %iftmp.0.0 = select i1 %1, i32 4, i32 13 ; <i32> [#uses=1]
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl $42, (%rdi)
+; CHECK-NEXT: setl %al
+; CHECK-NEXT: leal 4(%rax,%rax,8), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = load i32, i32* %P, align 4
+ %1 = icmp sgt i32 %0, 41
+ %iftmp.0.0 = select i1 %1, i32 4, i32 13
ret i32 %iftmp.0.0
}
@@ -151,16 +193,21 @@ entry:
; because it isn't worth it. Just use a branch instead.
define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
; CHECK-LABEL: test7:
-; CHECK: testb $1, %dil
-; CHECK-NEXT: jne LBB
-
+; CHECK: # BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: jne .LBB6_2
+; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: movb %dl, %sil
+; CHECK-NEXT: .LBB6_2:
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%d = select i1 %c, i8 %a, i8 %b
ret i8 %d
}
define i32 @smin(i32 %x) {
; CHECK-LABEL: smin:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: xorl $-1, %edi
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: cmovsl %edi, %eax
diff --git a/test/CodeGen/X86/code_placement_cold_loop_blocks.ll b/test/CodeGen/X86/code_placement_cold_loop_blocks.ll
index d7dc8defac3a..875d791dc802 100644
--- a/test/CodeGen/X86/code_placement_cold_loop_blocks.ll
+++ b/test/CodeGen/X86/code_placement_cold_loop_blocks.ll
@@ -37,7 +37,7 @@ end:
ret void
}
-define void @nested_loop_0() !prof !1 {
+define void @nested_loop_0(i1 %flag) !prof !1 {
; Test if a block that is cold in the inner loop but not cold in the outer loop
; will merged to the outer loop chain.
;
@@ -68,8 +68,7 @@ if.then:
if.else:
call void @e()
- %call2 = call zeroext i1 @a()
- br i1 %call2, label %header2, label %header, !prof !3
+ br i1 %flag, label %header2, label %header, !prof !3
end:
call void @f()
diff --git a/test/CodeGen/X86/combine-avx-intrinsics.ll b/test/CodeGen/X86/combine-avx-intrinsics.ll
index 64e081523c1f..811b1f20833c 100644
--- a/test/CodeGen/X86/combine-avx-intrinsics.ll
+++ b/test/CodeGen/X86/combine-avx-intrinsics.ll
@@ -1,59 +1,56 @@
-; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0) {
+; CHECK-LABEL: test_x86_avx_blend_pd_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a0, i32 7)
ret <4 x double> %1
}
-; CHECK-LABEL: test_x86_avx_blend_pd_256
-; CHECK-NOT: vblendpd
-; CHECK: ret
-
define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0) {
+; CHECK-LABEL: test_x86_avx_blend_ps_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a0, i32 7)
ret <8 x float> %1
}
-; CHECK-LABEL: test_x86_avx_blend_ps_256
-; CHECK-NOT: vblendps
-; CHECK: ret
-
define <4 x double> @test2_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
+; CHECK-LABEL: test2_x86_avx_blend_pd_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 0)
ret <4 x double> %1
}
-; CHECK-LABEL: test2_x86_avx_blend_pd_256
-; CHECK-NOT: vblendpd
-; CHECK: ret
-
define <8 x float> @test2_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
+; CHECK-LABEL: test2_x86_avx_blend_ps_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 0)
ret <8 x float> %1
}
-; CHECK-LABEL: test2_x86_avx_blend_ps_256
-; CHECK-NOT: vblendps
-; CHECK: ret
-
define <4 x double> @test3_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
+; CHECK-LABEL: test3_x86_avx_blend_pd_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 -1)
ret <4 x double> %1
}
-; CHECK-LABEL: test3_x86_avx_blend_pd_256
-; CHECK-NOT: vblendpd
-; CHECK: ret
-
define <8 x float> @test3_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
+; CHECK-LABEL: test3_x86_avx_blend_ps_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 -1)
ret <8 x float> %1
}
-; CHECK-LABEL: test3_x86_avx_blend_ps_256
-; CHECK-NOT: vblendps
-; CHECK: ret
-
declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32)
declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32)
diff --git a/test/CodeGen/X86/combine-avx2-intrinsics.ll b/test/CodeGen/X86/combine-avx2-intrinsics.ll
index 2714b26c9141..9a548f6b7f0e 100644
--- a/test/CodeGen/X86/combine-avx2-intrinsics.ll
+++ b/test/CodeGen/X86/combine-avx2-intrinsics.ll
@@ -1,88 +1,83 @@
-; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s
; Verify that the backend correctly combines AVX2 builtin intrinsics.
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0) {
+; CHECK-LABEL: test_x86_avx2_pblendw:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a0, i32 7)
ret <16 x i16> %res
}
-; CHECK-LABEL: test_x86_avx2_pblendw
-; CHECK-NOT: vpblendw
-; CHECK: ret
-
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0) {
+; CHECK-LABEL: test_x86_avx2_pblendd_128:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a0, i32 7)
ret <4 x i32> %res
}
-; CHECK-LABEL: test_x86_avx2_pblendd_128
-; CHECK-NOT: vpblendd
-; CHECK: ret
-
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0) {
+; CHECK-LABEL: test_x86_avx2_pblendd_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a0, i32 7)
ret <8 x i32> %res
}
-; CHECK-LABEL: test_x86_avx2_pblendd_256
-; CHECK-NOT: vpblendd
-; CHECK: ret
-
define <16 x i16> @test2_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: test2_x86_avx2_pblendw:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 0)
ret <16 x i16> %res
}
-; CHECK-LABEL: test2_x86_avx2_pblendw
-; CHECK-NOT: vpblendw
-; CHECK: ret
-
define <4 x i32> @test2_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: test2_x86_avx2_pblendd_128:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 0)
ret <4 x i32> %res
}
-; CHECK-LABEL: test2_x86_avx2_pblendd_128
-; CHECK-NOT: vpblendd
-; CHECK: ret
-
define <8 x i32> @test2_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: test2_x86_avx2_pblendd_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 0)
ret <8 x i32> %res
}
-; CHECK-LABEL: test2_x86_avx2_pblendd_256
-; CHECK-NOT: vpblendd
-; CHECK: ret
-
define <16 x i16> @test3_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: test3_x86_avx2_pblendw:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 -1)
ret <16 x i16> %res
}
-; CHECK-LABEL: test3_x86_avx2_pblendw
-; CHECK-NOT: vpblendw
-; CHECK: ret
-
define <4 x i32> @test3_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: test3_x86_avx2_pblendd_128:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 -1)
ret <4 x i32> %res
}
-; CHECK-LABEL: test3_x86_avx2_pblendd_128
-; CHECK-NOT: vpblendd
-; CHECK: ret
-
define <8 x i32> @test3_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: test3_x86_avx2_pblendd_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 -1)
ret <8 x i32> %res
}
-; CHECK-LABEL: test3_x86_avx2_pblendd_256
-; CHECK-NOT: vpblendd
-; CHECK: ret
-
declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i32)
declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i32)
diff --git a/test/CodeGen/X86/combine-rotates.ll b/test/CodeGen/X86/combine-rotates.ll
new file mode 100644
index 000000000000..713ee5d0f65a
--- /dev/null
+++ b/test/CodeGen/X86/combine-rotates.ll
@@ -0,0 +1,80 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=XOP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512
+
+; fold (rot (rot x, c1), c2) -> rot x, c1+c2
+define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
+; XOP-LABEL: combine_vec_rot_rot:
+; XOP: # BB#0:
+; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
+; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
+; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; XOP-NEXT: retq
+;
+; AVX512-LABEL: combine_vec_rot_rot:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+ %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
+ %2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
+ %3 = or <4 x i32> %1, %2
+ %4 = lshr <4 x i32> %3, <i32 12, i32 13, i32 14, i32 15>
+ %5 = shl <4 x i32> %3, <i32 20, i32 19, i32 18, i32 17>
+ %6 = or <4 x i32> %4, %5
+ ret <4 x i32> %6
+}
+
+define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
+; XOP-LABEL: combine_vec_rot_rot_splat:
+; XOP: # BB#0:
+; XOP-NEXT: vprotd $7, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
+; AVX512-LABEL: combine_vec_rot_rot_splat:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsrld $3, %xmm0, %xmm1
+; AVX512-NEXT: vpslld $29, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpsrld $22, %xmm0, %xmm1
+; AVX512-NEXT: vpslld $10, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+ %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
+ %2 = shl <4 x i32> %x, <i32 29, i32 29, i32 29, i32 29>
+ %3 = or <4 x i32> %1, %2
+ %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
+ %5 = shl <4 x i32> %3, <i32 10, i32 10, i32 10, i32 10>
+ %6 = or <4 x i32> %4, %5
+ ret <4 x i32> %6
+}
+
+define <4 x i32> @combine_vec_rot_rot_splat_zero(<4 x i32> %x) {
+; XOP-LABEL: combine_vec_rot_rot_splat_zero:
+; XOP: # BB#0:
+; XOP-NEXT: retq
+;
+; AVX512-LABEL: combine_vec_rot_rot_splat_zero:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsrld $1, %xmm0, %xmm1
+; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpsrld $31, %xmm0, %xmm1
+; AVX512-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+ %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
+ %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
+ %3 = or <4 x i32> %1, %2
+ %4 = lshr <4 x i32> %3, <i32 31, i32 31, i32 31, i32 31>
+ %5 = shl <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1>
+ %6 = or <4 x i32> %4, %5
+ ret <4 x i32> %6
+}
diff --git a/test/CodeGen/X86/combine-sse41-intrinsics.ll b/test/CodeGen/X86/combine-sse41-intrinsics.ll
index 1916883c201b..0c8e7b317ec6 100644
--- a/test/CodeGen/X86/combine-sse41-intrinsics.ll
+++ b/test/CodeGen/X86/combine-sse41-intrinsics.ll
@@ -1,89 +1,81 @@
-; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=corei7 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s
define <2 x double> @test_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
+; CHECK-LABEL: test_x86_sse41_blend_pd:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 0)
ret <2 x double> %1
}
-; CHECK-LABEL: test_x86_sse41_blend_pd
-; CHECK-NOT: blendpd
-; CHECK: ret
-
define <4 x float> @test_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
+; CHECK-LABEL: test_x86_sse41_blend_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 0)
ret <4 x float> %1
}
-; CHECK-LABEL: test_x86_sse41_blend_ps
-; CHECK-NOT: blendps
-; CHECK: ret
-
define <8 x i16> @test_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: test_x86_sse41_pblend_w:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 0)
ret <8 x i16> %1
}
-; CHECK-LABEL: test_x86_sse41_pblend_w
-; CHECK-NOT: pblendw
-; CHECK: ret
-
define <2 x double> @test2_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
+; CHECK-LABEL: test2_x86_sse41_blend_pd:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 -1)
ret <2 x double> %1
}
-; CHECK-LABEL: test2_x86_sse41_blend_pd
-; CHECK-NOT: blendpd
-; CHECK: movaps %xmm1, %xmm0
-; CHECK-NEXT: ret
-
define <4 x float> @test2_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
+; CHECK-LABEL: test2_x86_sse41_blend_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 -1)
ret <4 x float> %1
}
-; CHECK-LABEL: test2_x86_sse41_blend_ps
-; CHECK-NOT: blendps
-; CHECK: movaps %xmm1, %xmm0
-; CHECK-NEXT: ret
-
define <8 x i16> @test2_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: test2_x86_sse41_pblend_w:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 -1)
ret <8 x i16> %1
}
-; CHECK-LABEL: test2_x86_sse41_pblend_w
-; CHECK-NOT: pblendw
-; CHECK: movaps %xmm1, %xmm0
-; CHECK-NEXT: ret
-
define <2 x double> @test3_x86_sse41_blend_pd(<2 x double> %a0) {
+; CHECK-LABEL: test3_x86_sse41_blend_pd:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a0, i32 7)
ret <2 x double> %1
}
-; CHECK-LABEL: test3_x86_sse41_blend_pd
-; CHECK-NOT: blendpd
-; CHECK: ret
-
define <4 x float> @test3_x86_sse41_blend_ps(<4 x float> %a0) {
+; CHECK-LABEL: test3_x86_sse41_blend_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a0, i32 7)
ret <4 x float> %1
}
-; CHECK-LABEL: test3_x86_sse41_blend_ps
-; CHECK-NOT: blendps
-; CHECK: ret
-
define <8 x i16> @test3_x86_sse41_pblend_w(<8 x i16> %a0) {
+; CHECK-LABEL: test3_x86_sse41_pblend_w:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a0, i32 7)
ret <8 x i16> %1
}
-; CHECK-LABEL: test3_x86_sse41_pblend_w
-; CHECK-NOT: pblendw
-; CHECK: ret
-
declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i32)
declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i32)
diff --git a/test/CodeGen/X86/constant-hoisting-bfi.ll b/test/CodeGen/X86/constant-hoisting-bfi.ll
index 83589b7706f7..d73f7163fd87 100644
--- a/test/CodeGen/X86/constant-hoisting-bfi.ll
+++ b/test/CodeGen/X86/constant-hoisting-bfi.ll
@@ -4,13 +4,13 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; Check when BFI is enabled for constant hoisting, constant 214748364701
; will not be hoisted to the func entry.
-; CHECK-LABEL: @foo(
+; CHECK-LABEL: @test1(
; CHECK: entry:
; CHECK-NOT: bitcast i64 214748364701 to i64
; CHECK: if.then:
; Function Attrs: norecurse nounwind uwtable
-define i64 @foo(i64* nocapture %a) {
+define i64 @test1(i64* nocapture %a) {
entry:
%arrayidx = getelementptr inbounds i64, i64* %a, i64 9
%t0 = load i64, i64* %arrayidx, align 8
@@ -52,7 +52,7 @@ return: ; preds = %if.else5, %if.then,
; in while.body will be hoisted to while.body.preheader. 214748364701 in
; if.then16 and if.else10 will be merged and hoisted to the beginning of
; if.else10 because if.else10 dominates if.then16.
-; CHECK-LABEL: @goo(
+; CHECK-LABEL: @test2(
; CHECK: entry:
; CHECK-NOT: bitcast i64 214748364701 to i64
; CHECK: while.body.preheader:
@@ -61,7 +61,7 @@ return: ; preds = %if.else5, %if.then,
; CHECK: if.else10:
; CHECK-NEXT: bitcast i64 214748364701 to i64
; CHECK-NOT: bitcast i64 214748364701 to i64
-define i64 @goo(i64* nocapture %a) {
+define i64 @test2(i64* nocapture %a) {
entry:
%arrayidx = getelementptr inbounds i64, i64* %a, i64 9
%t0 = load i64, i64* %arrayidx, align 8
@@ -113,3 +113,47 @@ return: ; preds = %while.cond.preheade
}
!0 = !{!"branch_weights", i32 1, i32 2000}
+
+; 214748364701 will be hoisted to entry block to reduce code size.
+; CHECK-LABEL: @test3(
+; CHECK: entry:
+; CHECK-NEXT: %const = bitcast i64 214748364701 to i64
+define i64 @test3(i64 %t0) {
+entry:
+ %cmp = icmp ult i64 %t0, 56
+ br i1 %cmp, label %if.then, label %if.else
+
+; CHECK: if.then:
+; CHECK-NOT: %const = bitcast i64 214748364701 to i64
+if.then:
+ %add1 = add i64 %t0, 214748364701
+ br label %return
+
+; CHECK: if.else:
+; CHECK-NOT: %const = bitcast i64 214748364701 to i64
+if.else:
+ %add2 = add i64 %t0, 214748364701
+ br label %return
+
+return:
+ %retval = phi i64 [ %add1, %if.then ], [ %add2, %if.else ]
+ ret i64 %retval
+}
+
+; 214748364701 will not be hoisted to entry block because it will only
+; increase its live range.
+; CHECK-LABEL: @test4(
+; CHECK: nextblock:
+; CHECK-NEXT: %add1 = add i64 %t0, 214748364701
+define i64 @test4(i64 %t0) {
+entry:
+ %cmp = icmp ult i64 %t0, 56
+ br label %nextblock
+
+nextblock:
+ %add1 = add i64 %t0, 214748364701
+ br label %return
+
+return:
+ ret i64 %add1
+}
diff --git a/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll b/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
index 9dd184c8ab31..88778b317b97 100644
--- a/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
+++ b/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
@@ -62,4 +62,128 @@ define void @test_memcpy_args(i8** %Storage) {
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 4, i32 4) ret void
}
+define i8* @test_memmove1(i8* %P, i8* %Q) {
+ ; CHECK: test_memmove
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1, i32 1)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $1, %edx
+ ; CHECK: __llvm_memmove_element_unordered_atomic_1
+}
+
+define i8* @test_memmove2(i8* %P, i8* %Q) {
+ ; CHECK: test_memmove2
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 2, i32 2)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $2, %edx
+ ; CHECK: __llvm_memmove_element_unordered_atomic_2
+}
+
+define i8* @test_memmove4(i8* %P, i8* %Q) {
+ ; CHECK: test_memmove4
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 4, i32 4)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $4, %edx
+ ; CHECK: __llvm_memmove_element_unordered_atomic_4
+}
+
+define i8* @test_memmove8(i8* %P, i8* %Q) {
+ ; CHECK: test_memmove8
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 8, i32 8)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $8, %edx
+ ; CHECK: __llvm_memmove_element_unordered_atomic_8
+}
+
+define i8* @test_memmove16(i8* %P, i8* %Q) {
+ ; CHECK: test_memmove16
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 16, i32 16)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $16, %edx
+ ; CHECK: __llvm_memmove_element_unordered_atomic_16
+}
+
+define void @test_memmove_args(i8** %Storage) {
+ ; CHECK: test_memmove_args
+ %Dst = load i8*, i8** %Storage
+ %Src.addr = getelementptr i8*, i8** %Storage, i64 1
+ %Src = load i8*, i8** %Src.addr
+
+ ; 1st arg (%rdi)
+ ; CHECK-DAG: movq (%rdi), [[REG1:%r.+]]
+ ; CHECK-DAG: movq [[REG1]], %rdi
+ ; 2nd arg (%rsi)
+ ; CHECK-DAG: movq 8(%rdi), %rsi
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $4, %edx
+ ; CHECK: __llvm_memmove_element_unordered_atomic_4
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 4, i32 4) ret void
+}
+
+define i8* @test_memset1(i8* %P, i8 %V) {
+ ; CHECK: test_memset
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 1, i32 1)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $1, %edx
+ ; CHECK: __llvm_memset_element_unordered_atomic_1
+}
+
+define i8* @test_memset2(i8* %P, i8 %V) {
+ ; CHECK: test_memset2
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 2, i32 2)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $2, %edx
+ ; CHECK: __llvm_memset_element_unordered_atomic_2
+}
+
+define i8* @test_memset4(i8* %P, i8 %V) {
+ ; CHECK: test_memset4
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 4, i32 4)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $4, %edx
+ ; CHECK: __llvm_memset_element_unordered_atomic_4
+}
+
+define i8* @test_memset8(i8* %P, i8 %V) {
+ ; CHECK: test_memset8
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %P, i8 %V, i32 8, i32 8)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $8, %edx
+ ; CHECK: __llvm_memset_element_unordered_atomic_8
+}
+
+define i8* @test_memset16(i8* %P, i8 %V) {
+ ; CHECK: test_memset16
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %P, i8 %V, i32 16, i32 16)
+ ret i8* %P
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $16, %edx
+ ; CHECK: __llvm_memset_element_unordered_atomic_16
+}
+
+define void @test_memset_args(i8** %Storage, i8* %V) {
+ ; CHECK: test_memset_args
+ %Dst = load i8*, i8** %Storage
+ %Val = load i8, i8* %V
+
+ ; 1st arg (%rdi)
+ ; CHECK-DAG: movq (%rdi), %rdi
+ ; 2nd arg (%rsi)
+ ; CHECK-DAG: movzbl (%rsi), %esi
+ ; 3rd arg (%edx) -- length
+ ; CHECK-DAG: movl $4, %edx
+ ; CHECK: __llvm_memset_element_unordered_atomic_4
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %Dst, i8 %Val, i32 4, i32 4) ret void
+}
+
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nocapture, i8, i32, i32) nounwind
diff --git a/test/CodeGen/X86/extract-store.ll b/test/CodeGen/X86/extract-store.ll
index 48cb8d70b974..4ea6b7801fb3 100644
--- a/test/CodeGen/X86/extract-store.ll
+++ b/test/CodeGen/X86/extract-store.ll
@@ -345,7 +345,7 @@ define void @extract_i64_1(i64* nocapture %dst, <2 x i64> %foo) nounwind {
; SSE-X32-LABEL: extract_i64_1:
; SSE-X32: # BB#0:
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; SSE-X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-X32-NEXT: movq %xmm0, (%eax)
; SSE-X32-NEXT: retl
;
diff --git a/test/CodeGen/X86/extractelement-legalization-store-ordering.ll b/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
index 5d5cbc76f92e..4d0b5ccc16b0 100644
--- a/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
+++ b/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -mtriple i386-apple-darwin -mcpu=yonah | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah | FileCheck %s
target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
@@ -6,31 +7,31 @@ target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
; into loads, off the stack or a previous store.
; Be very explicit about the ordering/stack offsets.
-; CHECK-LABEL: test_extractelement_legalization_storereuse:
-; CHECK: # BB#0
-; CHECK-NEXT: pushl %ebx
-; CHECK-NEXT: pushl %edi
-; CHECK-NEXT: pushl %esi
-; CHECK-NEXT: movl 16(%esp), %eax
-; CHECK-NEXT: movl 24(%esp), %ecx
-; CHECK-NEXT: movl 20(%esp), %edx
-; CHECK-NEXT: paddd (%edx), %xmm0
-; CHECK-NEXT: movdqa %xmm0, (%edx)
-; CHECK-NEXT: movl (%edx), %esi
-; CHECK-NEXT: movl 4(%edx), %edi
-; CHECK-NEXT: shll $4, %ecx
-; CHECK-NEXT: movl 8(%edx), %ebx
-; CHECK-NEXT: movl 12(%edx), %edx
-; CHECK-NEXT: movl %esi, 12(%eax,%ecx)
-; CHECK-NEXT: movl %edi, (%eax,%ecx)
-; CHECK-NEXT: movl %ebx, 8(%eax,%ecx)
-; CHECK-NEXT: movl %edx, 4(%eax,%ecx)
-; CHECK-NEXT: popl %esi
-; CHECK-NEXT: popl %edi
-; CHECK-NEXT: popl %ebx
-; CHECK-NEXT: retl
-
define void @test_extractelement_legalization_storereuse(<4 x i32> %a, i32* nocapture %x, i32* nocapture readonly %y, i32 %i) #0 {
+; CHECK-LABEL: test_extractelement_legalization_storereuse:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: pushl %ebx
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: paddd (%ecx), %xmm0
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: movdqa %xmm0, (%ecx)
+; CHECK-NEXT: movl (%ecx), %esi
+; CHECK-NEXT: movl 4(%ecx), %edi
+; CHECK-NEXT: shll $4, %edx
+; CHECK-NEXT: movl 8(%ecx), %ebx
+; CHECK-NEXT: movl 12(%ecx), %ecx
+; CHECK-NEXT: movl %esi, 12(%eax,%edx)
+; CHECK-NEXT: movl %edi, (%eax,%edx)
+; CHECK-NEXT: movl %ebx, 8(%eax,%edx)
+; CHECK-NEXT: movl %ecx, 4(%eax,%edx)
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: popl %edi
+; CHECK-NEXT: popl %ebx
+; CHECK-NEXT: retl
+; CHECK-NEXT: ## -- End function
entry:
%0 = bitcast i32* %y to <4 x i32>*
%1 = load <4 x i32>, <4 x i32>* %0, align 16
diff --git a/test/CodeGen/X86/fast-isel-abort-warm.ll b/test/CodeGen/X86/fast-isel-abort-warm.ll
index 3caa91b11ec6..e87d14bb28ad 100644
--- a/test/CodeGen/X86/fast-isel-abort-warm.ll
+++ b/test/CodeGen/X86/fast-isel-abort-warm.ll
@@ -1,4 +1,4 @@
-; RUN: llc -fast-isel -o - %s -fast-isel-report-on-fallback 2>&1 | FileCheck %s
+; RUN: llc -fast-isel -o - %s -fast-isel-report-on-fallback -pass-remarks-missed=isel 2>&1 | FileCheck %s
; Make sure FastISel report a warming when we asked it to do so.
; Note: This test needs to use whatever is not supported by FastISel.
; Thus, this test may fail because inline asm gets supported in FastISel.
@@ -6,9 +6,26 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx"
+; CHECK: remark: <unknown>:0:0: FastISel missed call: call void asm sideeffect
; CHECK: warning: Instruction selection used fallback path for foo
define void @foo(){
entry:
call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"()
ret void
}
+
+; CHECK: remark: <unknown>:0:0: FastISel missed: store i128
+; CHECK: warning: Instruction selection used fallback path for test_instruction_fallback
+define void @test_instruction_fallback(i128* %ptr){
+ %v1 = load i128, i128* %ptr
+ %result = add i128 %v1, %v1
+ store i128 %result, i128 * %ptr
+ ret void
+}
+
+; CHECK-NOT: remark: <unknown>:0:0: FastISel missed
+; CHECK-NOT: warning: Instruction selection used fallback path for test_instruction_not_fallback
+define i32 @test_instruction_not_fallback(i32 %a){
+ %result = add i32 %a, %a
+ ret i32 %result
+}
diff --git a/test/CodeGen/X86/fast-isel-gc-intrinsics.ll b/test/CodeGen/X86/fast-isel-gc-intrinsics.ll
new file mode 100644
index 000000000000..bf08ad01d7d8
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-gc-intrinsics.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s -fast-isel
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-linux-gnu"
+; Dont crash with gc intrinsics.
+
+; gcrelocate call should not be an LLVM Machine Block by itself.
+define i8 addrspace(1)* @test_gcrelocate(i8 addrspace(1)* %v) gc "statepoint-example" {
+entry:
+ %tok = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 0, i8 addrspace(1)* %v)
+ %vnew = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tok, i32 7, i32 7)
+ ret i8 addrspace(1)* %vnew
+}
+
+; gcresult calls are fine in their own blocks.
+define i1 @test_gcresult() gc "statepoint-example" {
+entry:
+ %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0)
+ %call1 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token)
+ ret i1 %call1
+}
+
+; we are okay here because we see the gcrelocate and avoid generating their own
+; block.
+define i1 @test_gcresult_gcrelocate(i8 addrspace(1)* %v) gc "statepoint-example" {
+entry:
+ %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, i8 addrspace(1)* %v)
+ %call1 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token)
+ %vnew = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %safepoint_token, i32 7, i32 7)
+ ret i1 %call1
+}
+
+define i8 addrspace(1)* @test_non_entry_block(i8 addrspace(1)* %v, i8 %val) gc "statepoint-example" {
+entry:
+ %load = load i8, i8 addrspace(1)* %v
+ %cmp = icmp eq i8 %load, %val
+ br i1 %cmp, label %func_call, label %exit
+
+func_call:
+ call void @dummy()
+ %tok = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 0, i8 addrspace(1)* %v)
+ %vnew = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tok, i32 7, i32 7)
+ ret i8 addrspace(1)* %vnew
+
+exit:
+ ret i8 addrspace(1)* %v
+
+}
+
+declare void @dummy()
+declare void @foo()
+
+declare zeroext i1 @return_i1()
+declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64, i32, i1 ()*, i32, i32, ...)
+declare i1 @llvm.experimental.gc.result.i1(token)
+declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32, i32)
diff --git a/test/CodeGen/X86/fastisel-softfloat.ll b/test/CodeGen/X86/fastisel-softfloat.ll
new file mode 100644
index 000000000000..e4330db81e1a
--- /dev/null
+++ b/test/CodeGen/X86/fastisel-softfloat.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc %s -o - | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define float @pr26522(float %pat) #0 {
+; CHECK-LABEL: pr26522:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
+ ret float %pat
+}
+
+attributes #0 = { noinline optnone "target-features"="+soft-float" }
diff --git a/test/CodeGen/X86/fp128-i128.ll b/test/CodeGen/X86/fp128-i128.ll
index 6c6bc8bdc1d1..98082ec611d4 100644
--- a/test/CodeGen/X86/fp128-i128.ll
+++ b/test/CodeGen/X86/fp128-i128.ll
@@ -50,8 +50,8 @@ define void @TestUnionLD1(fp128 %s, i64 %n) #0 {
; CHECK-NEXT: andq %rdi, %rcx
; CHECK-NEXT: movabsq $-281474976710656, %rdx # imm = 0xFFFF000000000000
; CHECK-NEXT: andq -{{[0-9]+}}(%rsp), %rdx
-; CHECK-NEXT: orq %rcx, %rdx
; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: orq %rcx, %rdx
; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; CHECK-NEXT: jmp foo # TAILCALL
diff --git a/test/CodeGen/X86/gather-addresses.ll b/test/CodeGen/X86/gather-addresses.ll
index c3109673468e..e09ad3e4e0b8 100644
--- a/test/CodeGen/X86/gather-addresses.ll
+++ b/test/CodeGen/X86/gather-addresses.ll
@@ -16,10 +16,10 @@
; LIN: sarq $32, %r[[REG2]]
; LIN: movslq %e[[REG4]], %r[[REG3:.+]]
; LIN: sarq $32, %r[[REG4]]
-; LIN: movsd (%rdi,%r[[REG1]],8), %xmm0
-; LIN: movhpd (%rdi,%r[[REG2]],8), %xmm0
-; LIN: movsd (%rdi,%r[[REG3]],8), %xmm1
-; LIN: movhpd (%rdi,%r[[REG4]],8), %xmm1
+; LIN: movsd (%rdi,%r[[REG3]],8), %xmm1
+; LIN: movhpd (%rdi,%r[[REG4]],8), %xmm1
+; LIN: movq %rdi, %xmm1
+; LIN: movq %r[[REG3]], %xmm0
; WIN: movdqa (%rdx), %xmm0
; WIN: pand (%r8), %xmm0
@@ -29,10 +29,10 @@
; WIN: sarq $32, %r[[REG2]]
; WIN: movslq %e[[REG4]], %r[[REG3:.+]]
; WIN: sarq $32, %r[[REG4]]
-; WIN: movsd (%rcx,%r[[REG1]],8), %xmm0
-; WIN: movhpd (%rcx,%r[[REG2]],8), %xmm0
-; WIN: movsd (%rcx,%r[[REG3]],8), %xmm1
-; WIN: movhpd (%rcx,%r[[REG4]],8), %xmm1
+; WIN: movsd (%rcx,%r[[REG3]],8), %xmm1
+; WIN: movhpd (%rcx,%r[[REG4]],8), %xmm1
+; WIN: movdqa (%r[[REG2]]), %xmm0
+; WIN: movq %r[[REG2]], %xmm1
define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
%a = load <4 x i32>, <4 x i32>* %i
diff --git a/test/CodeGen/X86/half.ll b/test/CodeGen/X86/half.ll
index 4c8003f0c516..b7c43d3b2e3e 100644
--- a/test/CodeGen/X86/half.ll
+++ b/test/CodeGen/X86/half.ll
@@ -1,266 +1,833 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=-f16c -asm-verbose=false -fixup-byte-word-insts=1 \
-; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LIBCALL -check-prefix=BWON
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=-f16c -asm-verbose=false -fixup-byte-word-insts=0 \
-; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LIBCALL -check-prefix=BWOFF
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+f16c -asm-verbose=false -fixup-byte-word-insts=1 \
-; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-F16C -check-prefix=BWON
-; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr +sse2 -asm-verbose=false -fixup-byte-word-insts=0 \
-; RUN: | FileCheck %s -check-prefix=CHECK-I686
-
-define void @test_load_store(half* %in, half* %out) {
-; CHECK-LABEL: test_load_store:
-; BWON: movzwl (%rdi), %eax
-; BWOFF: movw (%rdi), %ax
-; CHECK: movw %ax, (%rsi)
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=-f16c -fixup-byte-word-insts=1 \
+; RUN: | FileCheck %s -check-prefixes=CHECK,CHECK-LIBCALL,BWON,BWON-NOF16C
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=-f16c -fixup-byte-word-insts=0 \
+; RUN: | FileCheck %s -check-prefixes=CHECK,CHECK-LIBCALL,BWOFF
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+f16c -fixup-byte-word-insts=1 \
+; RUN: | FileCheck %s -check-prefixes=CHECK,BWON,BWON-F16C
+; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr +sse2 -fixup-byte-word-insts=0 \
+; RUN: | FileCheck %s -check-prefixes=CHECK-I686
+
+define void @test_load_store(half* %in, half* %out) #0 {
+; BWON-LABEL: test_load_store:
+; BWON: # BB#0:
+; BWON-NEXT: movzwl (%rdi), %eax
+; BWON-NEXT: movw %ax, (%rsi)
+; BWON-NEXT: retq
+;
+; BWOFF-LABEL: test_load_store:
+; BWOFF: # BB#0:
+; BWOFF-NEXT: movw (%rdi), %ax
+; BWOFF-NEXT: movw %ax, (%rsi)
+; BWOFF-NEXT: retq
+;
+; CHECK-I686-LABEL: test_load_store:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-I686-NEXT: movw (%ecx), %cx
+; CHECK-I686-NEXT: movw %cx, (%eax)
+; CHECK-I686-NEXT: retl
%val = load half, half* %in
store half %val, half* %out
ret void
}
-define i16 @test_bitcast_from_half(half* %addr) {
-; CHECK-LABEL: test_bitcast_from_half:
-; BWON: movzwl (%rdi), %eax
-; BWOFF: movw (%rdi), %ax
+define i16 @test_bitcast_from_half(half* %addr) #0 {
+; BWON-LABEL: test_bitcast_from_half:
+; BWON: # BB#0:
+; BWON-NEXT: movzwl (%rdi), %eax
+; BWON-NEXT: retq
+;
+; BWOFF-LABEL: test_bitcast_from_half:
+; BWOFF: # BB#0:
+; BWOFF-NEXT: movw (%rdi), %ax
+; BWOFF-NEXT: retq
+;
+; CHECK-I686-LABEL: test_bitcast_from_half:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-I686-NEXT: movw (%eax), %ax
+; CHECK-I686-NEXT: retl
%val = load half, half* %addr
%val_int = bitcast half %val to i16
ret i16 %val_int
}
-define void @test_bitcast_to_half(half* %addr, i16 %in) {
+define void @test_bitcast_to_half(half* %addr, i16 %in) #0 {
; CHECK-LABEL: test_bitcast_to_half:
-; CHECK: movw %si, (%rdi)
+; CHECK: # BB#0:
+; CHECK-NEXT: movw %si, (%rdi)
+; CHECK-NEXT: retq
+;
+; CHECK-I686-LABEL: test_bitcast_to_half:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: movw {{[0-9]+}}(%esp), %ax
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-I686-NEXT: movw %ax, (%ecx)
+; CHECK-I686-NEXT: retl
%val_fp = bitcast i16 %in to half
store half %val_fp, half* %addr
ret void
}
-define float @test_extend32(half* %addr) {
-; CHECK-LABEL: test_extend32:
-
-; CHECK-LIBCALL: jmp __gnu_h2f_ieee
-; CHECK-F16C: vcvtph2ps
+define float @test_extend32(half* %addr) #0 {
+; CHECK-LIBCALL-LABEL: test_extend32:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
+; CHECK-LIBCALL-NEXT: jmp __gnu_h2f_ieee # TAILCALL
+;
+; BWON-F16C-LABEL: test_extend32:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: movswl (%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_extend32:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: subl $12, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-I686-NEXT: movzwl (%eax), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: addl $12, %esp
+; CHECK-I686-NEXT: retl
%val16 = load half, half* %addr
%val32 = fpext half %val16 to float
ret float %val32
}
-define double @test_extend64(half* %addr) {
-; CHECK-LABEL: test_extend64:
-
-; CHECK-LIBCALL: callq __gnu_h2f_ieee
-; CHECK-LIBCALL: cvtss2sd
-; CHECK-F16C: vcvtph2ps
-; CHECK-F16C: vcvtss2sd
+define double @test_extend64(half* %addr) #0 {
+; CHECK-LIBCALL-LABEL: test_extend64:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rax
+; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: cvtss2sd %xmm0, %xmm0
+; CHECK-LIBCALL-NEXT: popq %rax
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_extend64:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: movswl (%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_extend64:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: subl $12, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-I686-NEXT: movzwl (%eax), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: addl $12, %esp
+; CHECK-I686-NEXT: retl
%val16 = load half, half* %addr
%val32 = fpext half %val16 to double
ret double %val32
}
-define void @test_trunc32(float %in, half* %addr) {
-; CHECK-LABEL: test_trunc32:
-
-; CHECK-LIBCALL: callq __gnu_f2h_ieee
-; CHECK-F16C: vcvtps2ph
+define void @test_trunc32(float %in, half* %addr) #0 {
+; CHECK-LIBCALL-LABEL: test_trunc32:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rbx
+; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: movw %ax, (%rbx)
+; CHECK-LIBCALL-NEXT: popq %rbx
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_trunc32:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; BWON-F16C-NEXT: vmovd %xmm0, %eax
+; BWON-F16C-NEXT: movw %ax, (%rdi)
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_trunc32:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: pushl %esi
+; CHECK-I686-NEXT: subl $8, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: movss %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movw %ax, (%esi)
+; CHECK-I686-NEXT: addl $8, %esp
+; CHECK-I686-NEXT: popl %esi
+; CHECK-I686-NEXT: retl
%val16 = fptrunc float %in to half
store half %val16, half* %addr
ret void
}
-define void @test_trunc64(double %in, half* %addr) {
+define void @test_trunc64(double %in, half* %addr) #0 {
; CHECK-LABEL: test_trunc64:
-
-; CHECK-LIBCALL: callq __truncdfhf2
-; CHECK-F16C: callq __truncdfhf2
+; CHECK: # BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: callq __truncdfhf2
+; CHECK-NEXT: movw %ax, (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+;
+; CHECK-I686-LABEL: test_trunc64:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: pushl %esi
+; CHECK-I686-NEXT: subl $8, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-I686-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-I686-NEXT: movsd %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __truncdfhf2
+; CHECK-I686-NEXT: movw %ax, (%esi)
+; CHECK-I686-NEXT: addl $8, %esp
+; CHECK-I686-NEXT: popl %esi
+; CHECK-I686-NEXT: retl
%val16 = fptrunc double %in to half
store half %val16, half* %addr
ret void
}
define i64 @test_fptosi_i64(half* %p) #0 {
-; CHECK-LABEL: test_fptosi_i64:
-
-; CHECK-LIBCALL-NEXT: pushq %rax
-; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
-; CHECK-LIBCALL-NEXT: cvttss2si %xmm0, %rax
-; CHECK-LIBCALL-NEXT: popq %rcx
-; CHECK-LIBCALL-NEXT: retq
-
-; CHECK-F16C-NEXT: movswl (%rdi), [[REG0:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vmovd [[REG0]], [[REG1:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vcvtph2ps [[REG1]], [[REG2:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vcvttss2si [[REG2]], %rax
-; CHECK-F16C-NEXT: retq
+; CHECK-LIBCALL-LABEL: test_fptosi_i64:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rax
+; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: cvttss2si %xmm0, %rax
+; CHECK-LIBCALL-NEXT: popq %rcx
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_fptosi_i64:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: movswl (%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: vcvttss2si %xmm0, %rax
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_fptosi_i64:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: subl $12, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-I686-NEXT: movzwl (%eax), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstps (%esp)
+; CHECK-I686-NEXT: calll __fixsfdi
+; CHECK-I686-NEXT: addl $12, %esp
+; CHECK-I686-NEXT: retl
%a = load half, half* %p, align 2
%r = fptosi half %a to i64
ret i64 %r
}
define void @test_sitofp_i64(i64 %a, half* %p) #0 {
-; CHECK-LABEL: test_sitofp_i64:
-
-; CHECK-LIBCALL-NEXT: pushq [[ADDR:%[a-z]+]]
-; CHECK-LIBCALL-NEXT: movq %rsi, [[ADDR]]
-; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, %xmm0
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
-; CHECK-LIBCALL-NEXT: movw %ax, ([[ADDR]])
-; CHECK_LIBCALL-NEXT: popq [[ADDR]]
-; CHECK_LIBCALL-NEXT: retq
-
-; CHECK-F16C-NEXT: vcvtsi2ssq %rdi, [[REG0:%[a-z0-9]+]], [[REG0]]
-; CHECK-F16C-NEXT: vcvtps2ph $4, [[REG0]], [[REG0]]
-; CHECK-F16C-NEXT: vmovd [[REG0]], %eax
-; CHECK-F16C-NEXT: movw %ax, (%rsi)
-; CHECK-F16C-NEXT: retq
+; CHECK-LIBCALL-LABEL: test_sitofp_i64:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rbx
+; CHECK-LIBCALL-NEXT: movq %rsi, %rbx
+; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, %xmm0
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: movw %ax, (%rbx)
+; CHECK-LIBCALL-NEXT: popq %rbx
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_sitofp_i64:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; BWON-F16C-NEXT: vmovd %xmm0, %eax
+; BWON-F16C-NEXT: movw %ax, (%rsi)
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_sitofp_i64:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: pushl %esi
+; CHECK-I686-NEXT: subl $24, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-I686-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-I686-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: fildll {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: movss %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movw %ax, (%esi)
+; CHECK-I686-NEXT: addl $24, %esp
+; CHECK-I686-NEXT: popl %esi
+; CHECK-I686-NEXT: retl
%r = sitofp i64 %a to half
store half %r, half* %p
ret void
}
define i64 @test_fptoui_i64(half* %p) #0 {
-; CHECK-LABEL: test_fptoui_i64:
-
-; FP_TO_UINT is expanded using FP_TO_SINT
-; CHECK-LIBCALL-NEXT: pushq %rax
-; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
-; CHECK-LIBCALL-NEXT: movss {{.[A-Z_0-9]+}}(%rip), [[REG1:%[a-z0-9]+]]
-; CHECK-LIBCALL-NEXT: movaps %xmm0, [[REG2:%[a-z0-9]+]]
-; CHECK-LIBCALL-NEXT: subss [[REG1]], [[REG2]]
-; CHECK-LIBCALL-NEXT: cvttss2si [[REG2]], [[REG3:%[a-z0-9]+]]
-; CHECK-LIBCALL-NEXT: movabsq $-9223372036854775808, [[REG4:%[a-z0-9]+]]
-; CHECK-LIBCALL-NEXT: xorq [[REG3]], [[REG4]]
-; CHECK-LIBCALL-NEXT: cvttss2si %xmm0, [[REG5:%[a-z0-9]+]]
-; CHECK-LIBCALL-NEXT: ucomiss [[REG1]], %xmm0
-; CHECK-LIBCALL-NEXT: cmovaeq [[REG4]], [[REG5]]
-; CHECK-LIBCALL-NEXT: popq %rcx
-; CHECK-LIBCALL-NEXT: retq
-
-; CHECK-F16C-NEXT: movswl (%rdi), [[REG0:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vmovd [[REG0]], [[REG1:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vcvtph2ps [[REG1]], [[REG2:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vmovss {{.[A-Z_0-9]+}}(%rip), [[REG3:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vsubss [[REG3]], [[REG2]], [[REG4:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vcvttss2si [[REG4]], [[REG5:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: movabsq $-9223372036854775808, [[REG6:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: xorq [[REG5]], [[REG6:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vcvttss2si [[REG2]], [[REG7:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vucomiss [[REG3]], [[REG2]]
-; CHECK-F16C-NEXT: cmovaeq [[REG6]], %rax
-; CHECK-F16C-NEXT: retq
+; CHECK-LIBCALL-LABEL: test_fptoui_i64:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rax
+; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-LIBCALL-NEXT: movaps %xmm0, %xmm2
+; CHECK-LIBCALL-NEXT: subss %xmm1, %xmm2
+; CHECK-LIBCALL-NEXT: cvttss2si %xmm2, %rax
+; CHECK-LIBCALL-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
+; CHECK-LIBCALL-NEXT: xorq %rax, %rcx
+; CHECK-LIBCALL-NEXT: cvttss2si %xmm0, %rax
+; CHECK-LIBCALL-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-LIBCALL-NEXT: cmovaeq %rcx, %rax
+; CHECK-LIBCALL-NEXT: popq %rcx
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_fptoui_i64:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: movswl (%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; BWON-F16C-NEXT: vsubss %xmm1, %xmm0, %xmm2
+; BWON-F16C-NEXT: vcvttss2si %xmm2, %rax
+; BWON-F16C-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
+; BWON-F16C-NEXT: xorq %rax, %rcx
+; BWON-F16C-NEXT: vcvttss2si %xmm0, %rax
+; BWON-F16C-NEXT: vucomiss %xmm1, %xmm0
+; BWON-F16C-NEXT: cmovaeq %rcx, %rax
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_fptoui_i64:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: subl $12, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-I686-NEXT: movzwl (%eax), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstps (%esp)
+; CHECK-I686-NEXT: calll __fixunssfdi
+; CHECK-I686-NEXT: addl $12, %esp
+; CHECK-I686-NEXT: retl
%a = load half, half* %p, align 2
%r = fptoui half %a to i64
ret i64 %r
}
define void @test_uitofp_i64(i64 %a, half* %p) #0 {
-; CHECK-LABEL: test_uitofp_i64:
-; CHECK-LIBCALL-NEXT: pushq [[ADDR:%[a-z0-9]+]]
-; CHECK-LIBCALL-NEXT: movq %rsi, [[ADDR]]
-; CHECK-NEXT: testq %rdi, %rdi
-; CHECK-NEXT: js [[LABEL1:.LBB[0-9_]+]]
-
-; simple conversion to float if non-negative
-; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, [[REG1:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vcvtsi2ssq %rdi, [[REG1:%[a-z0-9]+]], [[REG1]]
-; CHECK-NEXT: jmp [[LABEL2:.LBB[0-9_]+]]
-
-; convert using shift+or if negative
-; CHECK-NEXT: [[LABEL1]]:
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: shrq %rax
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: orq %rax, [[REG2:%[a-z0-9]+]]
-; CHECK-LIBCALL-NEXT: cvtsi2ssq [[REG2]], [[REG3:%[a-z0-9]+]]
-; CHECK-LIBCALL-NEXT: addss [[REG3]], [[REG1]]
-; CHECK-F16C-NEXT: vcvtsi2ssq [[REG2]], [[REG3:%[a-z0-9]+]], [[REG3]]
-; CHECK-F16C-NEXT: vaddss [[REG3]], [[REG3]], [[REG1:[%a-z0-9]+]]
-
-; convert float to half
-; CHECK-NEXT: [[LABEL2]]:
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
-; CHECK-LIBCALL-NEXT: movw %ax, ([[ADDR]])
-; CHECK-LIBCALL-NEXT: popq [[ADDR]]
-; CHECK-F16C-NEXT: vcvtps2ph $4, [[REG1]], [[REG4:%[a-z0-9]+]]
-; CHECK-F16C-NEXT: vmovd [[REG4]], %eax
-; CHECK-F16C-NEXT: movw %ax, (%rsi)
-; CHECK-NEXT: retq
-
+; CHECK-LIBCALL-LABEL: test_uitofp_i64:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rbx
+; CHECK-LIBCALL-NEXT: movq %rsi, %rbx
+; CHECK-LIBCALL-NEXT: testq %rdi, %rdi
+; CHECK-LIBCALL-NEXT: js .LBB10_1
+; CHECK-LIBCALL-NEXT: # BB#2:
+; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, %xmm0
+; CHECK-LIBCALL-NEXT: jmp .LBB10_3
+; CHECK-LIBCALL-NEXT: .LBB10_1:
+; CHECK-LIBCALL-NEXT: movq %rdi, %rax
+; CHECK-LIBCALL-NEXT: shrq %rax
+; CHECK-LIBCALL-NEXT: andl $1, %edi
+; CHECK-LIBCALL-NEXT: orq %rax, %rdi
+; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, %xmm0
+; CHECK-LIBCALL-NEXT: addss %xmm0, %xmm0
+; CHECK-LIBCALL-NEXT: .LBB10_3:
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: movw %ax, (%rbx)
+; CHECK-LIBCALL-NEXT: popq %rbx
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_uitofp_i64:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: testq %rdi, %rdi
+; BWON-F16C-NEXT: js .LBB10_1
+; BWON-F16C-NEXT: # BB#2:
+; BWON-F16C-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
+; BWON-F16C-NEXT: jmp .LBB10_3
+; BWON-F16C-NEXT: .LBB10_1:
+; BWON-F16C-NEXT: movq %rdi, %rax
+; BWON-F16C-NEXT: shrq %rax
+; BWON-F16C-NEXT: andl $1, %edi
+; BWON-F16C-NEXT: orq %rax, %rdi
+; BWON-F16C-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
+; BWON-F16C-NEXT: vaddss %xmm0, %xmm0, %xmm0
+; BWON-F16C-NEXT: .LBB10_3:
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; BWON-F16C-NEXT: vmovd %xmm0, %eax
+; BWON-F16C-NEXT: movw %ax, (%rsi)
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_uitofp_i64:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: pushl %esi
+; CHECK-I686-NEXT: subl $24, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-I686-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-I686-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: xorl %eax, %eax
+; CHECK-I686-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: setns %al
+; CHECK-I686-NEXT: fildll {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; CHECK-I686-NEXT: fstps (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movw %ax, (%esi)
+; CHECK-I686-NEXT: addl $24, %esp
+; CHECK-I686-NEXT: popl %esi
+; CHECK-I686-NEXT: retl
%r = uitofp i64 %a to half
store half %r, half* %p
ret void
}
define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
-; CHECK-LABEL: test_extend32_vec4:
-
-; CHECK-LIBCALL: callq __gnu_h2f_ieee
-; CHECK-LIBCALL: callq __gnu_h2f_ieee
-; CHECK-LIBCALL: callq __gnu_h2f_ieee
-; CHECK-LIBCALL: callq __gnu_h2f_ieee
-; CHECK-F16C: vcvtph2ps
-; CHECK-F16C: vcvtph2ps
-; CHECK-F16C: vcvtph2ps
-; CHECK-F16C: vcvtph2ps
+; CHECK-LIBCALL-LABEL: test_extend32_vec4:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rbx
+; CHECK-LIBCALL-NEXT: subq $48, %rsp
+; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
+; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
+; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
+; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-LIBCALL-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-LIBCALL-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; CHECK-LIBCALL-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-LIBCALL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; CHECK-LIBCALL-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-LIBCALL-NEXT: addq $48, %rsp
+; CHECK-LIBCALL-NEXT: popq %rbx
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_extend32_vec4:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: movswl 6(%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: movswl 4(%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm1
+; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
+; BWON-F16C-NEXT: movswl (%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm2
+; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
+; BWON-F16C-NEXT: movswl 2(%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm3
+; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
+; BWON-F16C-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; BWON-F16C-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; BWON-F16C-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_extend32_vec4:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: pushl %esi
+; CHECK-I686-NEXT: subl $56, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-I686-NEXT: movzwl 2(%esi), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstpt {{[0-9]+}}(%esp) # 10-byte Folded Spill
+; CHECK-I686-NEXT: movzwl 4(%esi), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstpt {{[0-9]+}}(%esp) # 10-byte Folded Spill
+; CHECK-I686-NEXT: movzwl 6(%esi), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: movzwl (%esi), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: fldt {{[0-9]+}}(%esp) # 10-byte Folded Reload
+; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: fldt {{[0-9]+}}(%esp) # 10-byte Folded Reload
+; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-I686-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-I686-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-I686-NEXT: addl $56, %esp
+; CHECK-I686-NEXT: popl %esi
+; CHECK-I686-NEXT: retl
%a = load <4 x half>, <4 x half>* %p, align 8
%b = fpext <4 x half> %a to <4 x float>
ret <4 x float> %b
}
define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
-; CHECK-LABEL: test_extend64_vec4
-
-; CHECK-LIBCALL: callq __gnu_h2f_ieee
-; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee
-; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee
-; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee
-; CHECK-LIBCALL-DAG: cvtss2sd
-; CHECK-LIBCALL-DAG: cvtss2sd
-; CHECK-LIBCALL-DAG: cvtss2sd
-; CHECK-LIBCALL: cvtss2sd
-; CHECK-F16C: vcvtph2ps
-; CHECK-F16C-DAG: vcvtph2ps
-; CHECK-F16C-DAG: vcvtph2ps
-; CHECK-F16C-DAG: vcvtph2ps
-; CHECK-F16C-DAG: vcvtss2sd
-; CHECK-F16C-DAG: vcvtss2sd
-; CHECK-F16C-DAG: vcvtss2sd
-; CHECK-F16C: vcvtss2sd
+; CHECK-LIBCALL-LABEL: test_extend64_vec4:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rbx
+; CHECK-LIBCALL-NEXT: subq $16, %rsp
+; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
+; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: cvtss2sd %xmm0, %xmm1
+; CHECK-LIBCALL-NEXT: movss {{[0-9]+}}(%rsp), %xmm0 # 4-byte Reload
+; CHECK-LIBCALL-NEXT: # xmm0 = mem[0],zero,zero,zero
+; CHECK-LIBCALL-NEXT: cvtss2sd %xmm0, %xmm0
+; CHECK-LIBCALL-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-LIBCALL-NEXT: movss {{[0-9]+}}(%rsp), %xmm1 # 4-byte Reload
+; CHECK-LIBCALL-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-LIBCALL-NEXT: cvtss2sd %xmm1, %xmm2
+; CHECK-LIBCALL-NEXT: movss {{[0-9]+}}(%rsp), %xmm1 # 4-byte Reload
+; CHECK-LIBCALL-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-LIBCALL-NEXT: cvtss2sd %xmm1, %xmm1
+; CHECK-LIBCALL-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-LIBCALL-NEXT: addq $16, %rsp
+; CHECK-LIBCALL-NEXT: popq %rbx
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_extend64_vec4:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: movswl (%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: movswl 2(%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm1
+; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
+; BWON-F16C-NEXT: movswl 4(%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm2
+; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
+; BWON-F16C-NEXT: movswl 6(%rdi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm3
+; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
+; BWON-F16C-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
+; BWON-F16C-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
+; BWON-F16C-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; BWON-F16C-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
+; BWON-F16C-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
+; BWON-F16C-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; BWON-F16C-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_extend64_vec4:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: pushl %esi
+; CHECK-I686-NEXT: subl $88, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-I686-NEXT: movzwl 6(%esi), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstpt {{[0-9]+}}(%esp) # 10-byte Folded Spill
+; CHECK-I686-NEXT: movzwl 4(%esi), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstpt {{[0-9]+}}(%esp) # 10-byte Folded Spill
+; CHECK-I686-NEXT: movzwl 2(%esi), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstpt {{[0-9]+}}(%esp) # 10-byte Folded Spill
+; CHECK-I686-NEXT: movzwl (%esi), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstpl {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: fldt {{[0-9]+}}(%esp) # 10-byte Folded Reload
+; CHECK-I686-NEXT: fstpl {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: fldt {{[0-9]+}}(%esp) # 10-byte Folded Reload
+; CHECK-I686-NEXT: fstpl {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: fldt {{[0-9]+}}(%esp) # 10-byte Folded Reload
+; CHECK-I686-NEXT: fstpl {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-I686-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; CHECK-I686-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-I686-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; CHECK-I686-NEXT: addl $88, %esp
+; CHECK-I686-NEXT: popl %esi
+; CHECK-I686-NEXT: retl
%a = load <4 x half>, <4 x half>* %p, align 8
%b = fpext <4 x half> %a to <4 x double>
ret <4 x double> %b
}
-define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) {
-; CHECK-LABEL: test_trunc32_vec4:
-
-; CHECK-LIBCALL: callq __gnu_f2h_ieee
-; CHECK-LIBCALL: callq __gnu_f2h_ieee
-; CHECK-LIBCALL: callq __gnu_f2h_ieee
-; CHECK-LIBCALL: callq __gnu_f2h_ieee
-; CHECK-F16C: vcvtps2ph
-; CHECK-F16C: vcvtps2ph
-; CHECK-F16C: vcvtps2ph
-; CHECK-F16C: vcvtps2ph
-; CHECK: movw
-; CHECK: movw
-; CHECK: movw
-; CHECK: movw
+define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
+; BWON-NOF16C-LABEL: test_trunc32_vec4:
+; BWON-NOF16C: # BB#0:
+; BWON-NOF16C-NEXT: pushq %rbp
+; BWON-NOF16C-NEXT: pushq %r15
+; BWON-NOF16C-NEXT: pushq %r14
+; BWON-NOF16C-NEXT: pushq %rbx
+; BWON-NOF16C-NEXT: subq $24, %rsp
+; BWON-NOF16C-NEXT: movq %rdi, %rbx
+; BWON-NOF16C-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; BWON-NOF16C-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee
+; BWON-NOF16C-NEXT: movl %eax, %r14d
+; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWON-NOF16C-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee
+; BWON-NOF16C-NEXT: movl %eax, %r15d
+; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWON-NOF16C-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee
+; BWON-NOF16C-NEXT: movl %eax, %ebp
+; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee
+; BWON-NOF16C-NEXT: movw %ax, (%rbx)
+; BWON-NOF16C-NEXT: movw %bp, 6(%rbx)
+; BWON-NOF16C-NEXT: movw %r15w, 4(%rbx)
+; BWON-NOF16C-NEXT: movw %r14w, 2(%rbx)
+; BWON-NOF16C-NEXT: addq $24, %rsp
+; BWON-NOF16C-NEXT: popq %rbx
+; BWON-NOF16C-NEXT: popq %r14
+; BWON-NOF16C-NEXT: popq %r15
+; BWON-NOF16C-NEXT: popq %rbp
+; BWON-NOF16C-NEXT: retq
+;
+; BWOFF-LABEL: test_trunc32_vec4:
+; BWOFF: # BB#0:
+; BWOFF-NEXT: pushq %rbp
+; BWOFF-NEXT: pushq %r15
+; BWOFF-NEXT: pushq %r14
+; BWOFF-NEXT: pushq %rbx
+; BWOFF-NEXT: subq $24, %rsp
+; BWOFF-NEXT: movq %rdi, %rbx
+; BWOFF-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; BWOFF-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; BWOFF-NEXT: callq __gnu_f2h_ieee
+; BWOFF-NEXT: movw %ax, %r14w
+; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWOFF-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; BWOFF-NEXT: callq __gnu_f2h_ieee
+; BWOFF-NEXT: movw %ax, %r15w
+; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWOFF-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; BWOFF-NEXT: callq __gnu_f2h_ieee
+; BWOFF-NEXT: movw %ax, %bp
+; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWOFF-NEXT: callq __gnu_f2h_ieee
+; BWOFF-NEXT: movw %ax, (%rbx)
+; BWOFF-NEXT: movw %bp, 6(%rbx)
+; BWOFF-NEXT: movw %r15w, 4(%rbx)
+; BWOFF-NEXT: movw %r14w, 2(%rbx)
+; BWOFF-NEXT: addq $24, %rsp
+; BWOFF-NEXT: popq %rbx
+; BWOFF-NEXT: popq %r14
+; BWOFF-NEXT: popq %r15
+; BWOFF-NEXT: popq %rbp
+; BWOFF-NEXT: retq
+;
+; BWON-F16C-LABEL: test_trunc32_vec4:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; BWON-F16C-NEXT: vmovd %xmm1, %eax
+; BWON-F16C-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; BWON-F16C-NEXT: vmovd %xmm1, %ecx
+; BWON-F16C-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; BWON-F16C-NEXT: vmovd %xmm1, %edx
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; BWON-F16C-NEXT: vmovd %xmm0, %esi
+; BWON-F16C-NEXT: movw %si, (%rdi)
+; BWON-F16C-NEXT: movw %dx, 6(%rdi)
+; BWON-F16C-NEXT: movw %cx, 4(%rdi)
+; BWON-F16C-NEXT: movw %ax, 2(%rdi)
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_trunc32_vec4:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: pushl %ebp
+; CHECK-I686-NEXT: pushl %ebx
+; CHECK-I686-NEXT: pushl %edi
+; CHECK-I686-NEXT: pushl %esi
+; CHECK-I686-NEXT: subl $44, %esp
+; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; CHECK-I686-NEXT: movaps %xmm0, %xmm1
+; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; CHECK-I686-NEXT: movss %xmm1, (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movw %ax, %si
+; CHECK-I686-NEXT: movaps {{[0-9]+}}(%esp), %xmm0 # 16-byte Reload
+; CHECK-I686-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; CHECK-I686-NEXT: movss %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movw %ax, %di
+; CHECK-I686-NEXT: movaps {{[0-9]+}}(%esp), %xmm0 # 16-byte Reload
+; CHECK-I686-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; CHECK-I686-NEXT: movss %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movw %ax, %bx
+; CHECK-I686-NEXT: movaps {{[0-9]+}}(%esp), %xmm0 # 16-byte Reload
+; CHECK-I686-NEXT: movss %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movw %ax, (%ebp)
+; CHECK-I686-NEXT: movw %bx, 6(%ebp)
+; CHECK-I686-NEXT: movw %di, 4(%ebp)
+; CHECK-I686-NEXT: movw %si, 2(%ebp)
+; CHECK-I686-NEXT: addl $44, %esp
+; CHECK-I686-NEXT: popl %esi
+; CHECK-I686-NEXT: popl %edi
+; CHECK-I686-NEXT: popl %ebx
+; CHECK-I686-NEXT: popl %ebp
+; CHECK-I686-NEXT: retl
%v = fptrunc <4 x float> %a to <4 x half>
store <4 x half> %v, <4 x half>* %p
ret void
}
-define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) {
-; CHECK-LABEL: test_trunc64_vec4:
-; CHECK: callq __truncdfhf2
-; CHECK: callq __truncdfhf2
-; CHECK: callq __truncdfhf2
-; CHECK: callq __truncdfhf2
-; CHECK: movw
-; CHECK: movw
-; CHECK: movw
-; CHECK: movw
+define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
+; BWON-NOF16C-LABEL: test_trunc64_vec4:
+; BWON-NOF16C: # BB#0:
+; BWON-NOF16C-NEXT: pushq %rbp
+; BWON-NOF16C-NEXT: pushq %r15
+; BWON-NOF16C-NEXT: pushq %r14
+; BWON-NOF16C-NEXT: pushq %rbx
+; BWON-NOF16C-NEXT: subq $40, %rsp
+; BWON-NOF16C-NEXT: movq %rdi, %rbx
+; BWON-NOF16C-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; BWON-NOF16C-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
+; BWON-NOF16C-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; BWON-NOF16C-NEXT: callq __truncdfhf2
+; BWON-NOF16C-NEXT: movl %eax, %r14d
+; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWON-NOF16C-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; BWON-NOF16C-NEXT: callq __truncdfhf2
+; BWON-NOF16C-NEXT: movl %eax, %r15d
+; BWON-NOF16C-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; BWON-NOF16C-NEXT: callq __truncdfhf2
+; BWON-NOF16C-NEXT: movl %eax, %ebp
+; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWON-NOF16C-NEXT: callq __truncdfhf2
+; BWON-NOF16C-NEXT: movw %ax, 4(%rbx)
+; BWON-NOF16C-NEXT: movw %bp, (%rbx)
+; BWON-NOF16C-NEXT: movw %r15w, 6(%rbx)
+; BWON-NOF16C-NEXT: movw %r14w, 2(%rbx)
+; BWON-NOF16C-NEXT: addq $40, %rsp
+; BWON-NOF16C-NEXT: popq %rbx
+; BWON-NOF16C-NEXT: popq %r14
+; BWON-NOF16C-NEXT: popq %r15
+; BWON-NOF16C-NEXT: popq %rbp
+; BWON-NOF16C-NEXT: retq
+;
+; BWOFF-LABEL: test_trunc64_vec4:
+; BWOFF: # BB#0:
+; BWOFF-NEXT: pushq %rbp
+; BWOFF-NEXT: pushq %r15
+; BWOFF-NEXT: pushq %r14
+; BWOFF-NEXT: pushq %rbx
+; BWOFF-NEXT: subq $40, %rsp
+; BWOFF-NEXT: movq %rdi, %rbx
+; BWOFF-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; BWOFF-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
+; BWOFF-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; BWOFF-NEXT: callq __truncdfhf2
+; BWOFF-NEXT: movw %ax, %r14w
+; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWOFF-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; BWOFF-NEXT: callq __truncdfhf2
+; BWOFF-NEXT: movw %ax, %r15w
+; BWOFF-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; BWOFF-NEXT: callq __truncdfhf2
+; BWOFF-NEXT: movw %ax, %bp
+; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; BWOFF-NEXT: callq __truncdfhf2
+; BWOFF-NEXT: movw %ax, 4(%rbx)
+; BWOFF-NEXT: movw %bp, (%rbx)
+; BWOFF-NEXT: movw %r15w, 6(%rbx)
+; BWOFF-NEXT: movw %r14w, 2(%rbx)
+; BWOFF-NEXT: addq $40, %rsp
+; BWOFF-NEXT: popq %rbx
+; BWOFF-NEXT: popq %r14
+; BWOFF-NEXT: popq %r15
+; BWOFF-NEXT: popq %rbp
+; BWOFF-NEXT: retq
+;
+; BWON-F16C-LABEL: test_trunc64_vec4:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: pushq %rbp
+; BWON-F16C-NEXT: pushq %r15
+; BWON-F16C-NEXT: pushq %r14
+; BWON-F16C-NEXT: pushq %rbx
+; BWON-F16C-NEXT: subq $88, %rsp
+; BWON-F16C-NEXT: movq %rdi, %rbx
+; BWON-F16C-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
+; BWON-F16C-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; BWON-F16C-NEXT: vzeroupper
+; BWON-F16C-NEXT: callq __truncdfhf2
+; BWON-F16C-NEXT: movl %eax, %r14d
+; BWON-F16C-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; BWON-F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
+; BWON-F16C-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
+; BWON-F16C-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; BWON-F16C-NEXT: vzeroupper
+; BWON-F16C-NEXT: callq __truncdfhf2
+; BWON-F16C-NEXT: movl %eax, %r15d
+; BWON-F16C-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; BWON-F16C-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; BWON-F16C-NEXT: vzeroupper
+; BWON-F16C-NEXT: callq __truncdfhf2
+; BWON-F16C-NEXT: movl %eax, %ebp
+; BWON-F16C-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; BWON-F16C-NEXT: callq __truncdfhf2
+; BWON-F16C-NEXT: movw %ax, 4(%rbx)
+; BWON-F16C-NEXT: movw %bp, (%rbx)
+; BWON-F16C-NEXT: movw %r15w, 6(%rbx)
+; BWON-F16C-NEXT: movw %r14w, 2(%rbx)
+; BWON-F16C-NEXT: addq $88, %rsp
+; BWON-F16C-NEXT: popq %rbx
+; BWON-F16C-NEXT: popq %r14
+; BWON-F16C-NEXT: popq %r15
+; BWON-F16C-NEXT: popq %rbp
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_trunc64_vec4:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: pushl %ebp
+; CHECK-I686-NEXT: pushl %ebx
+; CHECK-I686-NEXT: pushl %edi
+; CHECK-I686-NEXT: pushl %esi
+; CHECK-I686-NEXT: subl $60, %esp
+; CHECK-I686-NEXT: movaps %xmm1, {{[0-9]+}}(%esp) # 16-byte Spill
+; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; CHECK-I686-NEXT: movlps %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __truncdfhf2
+; CHECK-I686-NEXT: movw %ax, %si
+; CHECK-I686-NEXT: movapd {{[0-9]+}}(%esp), %xmm0 # 16-byte Reload
+; CHECK-I686-NEXT: movhpd %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __truncdfhf2
+; CHECK-I686-NEXT: movw %ax, %di
+; CHECK-I686-NEXT: movaps {{[0-9]+}}(%esp), %xmm0 # 16-byte Reload
+; CHECK-I686-NEXT: movlps %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __truncdfhf2
+; CHECK-I686-NEXT: movw %ax, %bx
+; CHECK-I686-NEXT: movapd {{[0-9]+}}(%esp), %xmm0 # 16-byte Reload
+; CHECK-I686-NEXT: movhpd %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __truncdfhf2
+; CHECK-I686-NEXT: movw %ax, 6(%ebp)
+; CHECK-I686-NEXT: movw %bx, 4(%ebp)
+; CHECK-I686-NEXT: movw %di, 2(%ebp)
+; CHECK-I686-NEXT: movw %si, (%ebp)
+; CHECK-I686-NEXT: addl $60, %esp
+; CHECK-I686-NEXT: popl %esi
+; CHECK-I686-NEXT: popl %edi
+; CHECK-I686-NEXT: popl %ebx
+; CHECK-I686-NEXT: popl %ebp
+; CHECK-I686-NEXT: retl
%v = fptrunc <4 x double> %a to <4 x half>
store <4 x half> %v, <4 x half>* %p
ret void
@@ -272,40 +839,98 @@ declare float @test_floatret();
; to f80 and then rounded to f32. The DAG combiner should not combine this
; fp_round and the subsequent fptrunc from float to half.
define half @test_f80trunc_nodagcombine() #0 {
-; CHECK-LABEL: test_f80trunc_nodagcombine:
-; CHECK-I686-NOT: calll __truncxfhf2
+; CHECK-LIBCALL-LABEL: test_f80trunc_nodagcombine:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rax
+; CHECK-LIBCALL-NEXT: callq test_floatret
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: movzwl %ax, %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: popq %rax
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_f80trunc_nodagcombine:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: pushq %rax
+; BWON-F16C-NEXT: callq test_floatret
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: popq %rax
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_f80trunc_nodagcombine:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: subl $12, %esp
+; CHECK-I686-NEXT: calll test_floatret
+; CHECK-I686-NEXT: fstps (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movzwl %ax, %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: addl $12, %esp
+; CHECK-I686-NEXT: retl
%1 = call float @test_floatret()
%2 = fptrunc float %1 to half
ret half %2
}
-; CHECK-LABEL: test_sitofp_fadd_i32:
-; CHECK-LIBCALL-NEXT: pushq %rbx
-; CHECK-LIBCALL-NEXT: subq $16, %rsp
-; CHECK-LIBCALL-NEXT: movl %edi, %ebx
-; CHECK-LIBCALL-NEXT: movzwl (%rsi), %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
-; CHECK-LIBCALL-NEXT: movss %xmm0, 12(%rsp)
-; CHECK-LIBCALL-NEXT: cvtsi2ssl %ebx, %xmm0
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
-; CHECK-LIBCALL-NEXT: movzwl %ax, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
-; CHECK-LIBCALL-NEXT: addss 12(%rsp), %xmm0
-; CHECK-LIBCALL-NEXT: addq $16, %rsp
-; CHECK-LIBCALL-NEXT: popq %rbx
-; CHECK-LIBCALL-NEXT: retq
-; CHECK-F16C-NEXT: movswl (%rsi), %eax
-; CHECK-F16C-NEXT: vmovd %eax, %xmm0
-; CHECK-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; CHECK-F16C-NEXT: vcvtsi2ssl %edi, %xmm1, %xmm1
-; CHECK-F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
-; CHECK-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; CHECK-F16C-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; CHECK-F16C-NEXT: retq
define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
+; CHECK-LIBCALL-LABEL: test_sitofp_fadd_i32:
+; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL-NEXT: pushq %rbx
+; CHECK-LIBCALL-NEXT: subq $16, %rsp
+; CHECK-LIBCALL-NEXT: movl %edi, %ebx
+; CHECK-LIBCALL-NEXT: movzwl (%rsi), %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-LIBCALL-NEXT: cvtsi2ssl %ebx, %xmm0
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: movzwl %ax, %edi
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: addss {{[0-9]+}}(%rsp), %xmm0 # 4-byte Folded Reload
+; CHECK-LIBCALL-NEXT: addq $16, %rsp
+; CHECK-LIBCALL-NEXT: popq %rbx
+; CHECK-LIBCALL-NEXT: retq
+;
+; BWON-F16C-LABEL: test_sitofp_fadd_i32:
+; BWON-F16C: # BB#0:
+; BWON-F16C-NEXT: movswl (%rsi), %eax
+; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: vcvtsi2ssl %edi, %xmm1, %xmm1
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
+; BWON-F16C-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; BWON-F16C-NEXT: retq
+;
+; CHECK-I686-LABEL: test_sitofp_fadd_i32:
+; CHECK-I686: # BB#0:
+; CHECK-I686-NEXT: subl $28, %esp
+; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-I686-NEXT: movzwl (%eax), %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: movss %xmm0, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-I686-NEXT: xorps %xmm0, %xmm0
+; CHECK-I686-NEXT: cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
+; CHECK-I686-NEXT: movss %xmm0, (%esp)
+; CHECK-I686-NEXT: calll __gnu_f2h_ieee
+; CHECK-I686-NEXT: movzwl %ax, %eax
+; CHECK-I686-NEXT: movl %eax, (%esp)
+; CHECK-I686-NEXT: calll __gnu_h2f_ieee
+; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # 4-byte Reload
+; CHECK-I686-NEXT: # xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: addss {{[0-9]+}}(%esp), %xmm0
+; CHECK-I686-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: flds {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT: addl $28, %esp
+; CHECK-I686-NEXT: retl
%tmp0 = load half, half* %b
%tmp1 = sitofp i32 %a to half
%tmp2 = fadd half %tmp0, %tmp1
diff --git a/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/test/CodeGen/X86/illegal-bitfield-loadstore.ll
index ceb465711906..5425670fbb1e 100644
--- a/test/CodeGen/X86/illegal-bitfield-loadstore.ll
+++ b/test/CodeGen/X86/illegal-bitfield-loadstore.ll
@@ -1,17 +1,30 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64
define void @i24_or(i24* %a) {
-; CHECK-LABEL: i24_or:
-; CHECK: # BB#0:
-; CHECK-NEXT: movzwl (%rdi), %eax
-; CHECK-NEXT: movzbl 2(%rdi), %ecx
-; CHECK-NEXT: movb %cl, 2(%rdi)
-; CHECK-NEXT: shll $16, %ecx
-; CHECK-NEXT: orl %eax, %ecx
-; CHECK-NEXT: orl $384, %ecx # imm = 0x180
-; CHECK-NEXT: movw %cx, (%rdi)
-; CHECK-NEXT: retq
+; X86-LABEL: i24_or:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %edx
+; X86-NEXT: movzbl 2(%ecx), %eax
+; X86-NEXT: movb %al, 2(%ecx)
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: orl %edx, %eax
+; X86-NEXT: orl $384, %eax # imm = 0x180
+; X86-NEXT: movw %ax, (%ecx)
+; X86-NEXT: retl
+;
+; X64-LABEL: i24_or:
+; X64: # BB#0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzbl 2(%rdi), %ecx
+; X64-NEXT: movb %cl, 2(%rdi)
+; X64-NEXT: shll $16, %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: orl $384, %ecx # imm = 0x180
+; X64-NEXT: movw %cx, (%rdi)
+; X64-NEXT: retq
%aa = load i24, i24* %a, align 1
%b = or i24 %aa, 384
store i24 %b, i24* %a, align 1
@@ -19,17 +32,30 @@ define void @i24_or(i24* %a) {
}
define void @i24_and_or(i24* %a) {
-; CHECK-LABEL: i24_and_or:
-; CHECK: # BB#0:
-; CHECK-NEXT: movzwl (%rdi), %eax
-; CHECK-NEXT: movzbl 2(%rdi), %ecx
-; CHECK-NEXT: movb %cl, 2(%rdi)
-; CHECK-NEXT: shll $16, %ecx
-; CHECK-NEXT: orl %eax, %ecx
-; CHECK-NEXT: orl $384, %ecx # imm = 0x180
-; CHECK-NEXT: andl $16777088, %ecx # imm = 0xFFFF80
-; CHECK-NEXT: movw %cx, (%rdi)
-; CHECK-NEXT: retq
+; X86-LABEL: i24_and_or:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %edx
+; X86-NEXT: movzbl 2(%ecx), %eax
+; X86-NEXT: movb %al, 2(%ecx)
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: orl %edx, %eax
+; X86-NEXT: orl $384, %eax # imm = 0x180
+; X86-NEXT: andl $16777088, %eax # imm = 0xFFFF80
+; X86-NEXT: movw %ax, (%ecx)
+; X86-NEXT: retl
+;
+; X64-LABEL: i24_and_or:
+; X64: # BB#0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzbl 2(%rdi), %ecx
+; X64-NEXT: movb %cl, 2(%rdi)
+; X64-NEXT: shll $16, %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: orl $384, %ecx # imm = 0x180
+; X64-NEXT: andl $16777088, %ecx # imm = 0xFFFF80
+; X64-NEXT: movw %cx, (%rdi)
+; X64-NEXT: retq
%b = load i24, i24* %a, align 1
%c = and i24 %b, -128
%d = or i24 %c, 384
@@ -38,19 +64,40 @@ define void @i24_and_or(i24* %a) {
}
define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
-; CHECK-LABEL: i24_insert_bit:
-; CHECK: # BB#0:
-; CHECK-NEXT: movzbl %sil, %eax
-; CHECK-NEXT: movzwl (%rdi), %ecx
-; CHECK-NEXT: movzbl 2(%rdi), %edx
-; CHECK-NEXT: movb %dl, 2(%rdi)
-; CHECK-NEXT: shll $16, %edx
-; CHECK-NEXT: orl %ecx, %edx
-; CHECK-NEXT: shll $13, %eax
-; CHECK-NEXT: andl $16769023, %edx # imm = 0xFFDFFF
-; CHECK-NEXT: orl %eax, %edx
-; CHECK-NEXT: movw %dx, (%rdi)
-; CHECK-NEXT: retq
+; X86-LABEL: i24_insert_bit:
+; X86: # BB#0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: .Lcfi0:
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .Lcfi1:
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzwl (%ecx), %esi
+; X86-NEXT: movzbl 2(%ecx), %eax
+; X86-NEXT: movb %al, 2(%ecx)
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: orl %esi, %eax
+; X86-NEXT: shll $13, %edx
+; X86-NEXT: andl $16769023, %eax # imm = 0xFFDFFF
+; X86-NEXT: orl %edx, %eax
+; X86-NEXT: movw %ax, (%ecx)
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: i24_insert_bit:
+; X64: # BB#0:
+; X64-NEXT: movzbl %sil, %eax
+; X64-NEXT: movzwl (%rdi), %ecx
+; X64-NEXT: movzbl 2(%rdi), %edx
+; X64-NEXT: movb %dl, 2(%rdi)
+; X64-NEXT: shll $16, %edx
+; X64-NEXT: orl %ecx, %edx
+; X64-NEXT: shll $13, %eax
+; X64-NEXT: andl $16769023, %edx # imm = 0xFFDFFF
+; X64-NEXT: orl %eax, %edx
+; X64-NEXT: movw %dx, (%rdi)
+; X64-NEXT: retq
%extbit = zext i1 %bit to i24
%b = load i24, i24* %a, align 1
%extbit.shl = shl nuw nsw i24 %extbit, 13
@@ -61,22 +108,28 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
}
define void @i56_or(i56* %a) {
-; CHECK-LABEL: i56_or:
-; CHECK: # BB#0:
-; CHECK-NEXT: movzwl 4(%rdi), %eax
-; CHECK-NEXT: movzbl 6(%rdi), %ecx
-; CHECK-NEXT: movl (%rdi), %edx
-; CHECK-NEXT: movb %cl, 6(%rdi)
-; CHECK-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
-; CHECK-NEXT: shll $16, %ecx
-; CHECK-NEXT: orl %eax, %ecx
-; CHECK-NEXT: shlq $32, %rcx
-; CHECK-NEXT: orq %rcx, %rdx
-; CHECK-NEXT: orq $384, %rdx # imm = 0x180
-; CHECK-NEXT: movl %edx, (%rdi)
-; CHECK-NEXT: shrq $32, %rdx
-; CHECK-NEXT: movw %dx, 4(%rdi)
-; CHECK-NEXT: retq
+; X86-LABEL: i56_or:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $384, (%eax) # imm = 0x180
+; X86-NEXT: retl
+;
+; X64-LABEL: i56_or:
+; X64: # BB#0:
+; X64-NEXT: movzwl 4(%rdi), %eax
+; X64-NEXT: movzbl 6(%rdi), %ecx
+; X64-NEXT: movl (%rdi), %edx
+; X64-NEXT: movb %cl, 6(%rdi)
+; X64-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
+; X64-NEXT: shll $16, %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: shlq $32, %rcx
+; X64-NEXT: orq %rcx, %rdx
+; X64-NEXT: orq $384, %rdx # imm = 0x180
+; X64-NEXT: movl %edx, (%rdi)
+; X64-NEXT: shrq $32, %rdx
+; X64-NEXT: movw %dx, 4(%rdi)
+; X64-NEXT: retq
%aa = load i56, i56* %a, align 1
%b = or i56 %aa, 384
store i56 %b, i56* %a, align 1
@@ -84,24 +137,33 @@ define void @i56_or(i56* %a) {
}
define void @i56_and_or(i56* %a) {
-; CHECK-LABEL: i56_and_or:
-; CHECK: # BB#0:
-; CHECK-NEXT: movzwl 4(%rdi), %eax
-; CHECK-NEXT: movzbl 6(%rdi), %ecx
-; CHECK-NEXT: movl (%rdi), %edx
-; CHECK-NEXT: movb %cl, 6(%rdi)
-; CHECK-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
-; CHECK-NEXT: shll $16, %ecx
-; CHECK-NEXT: orl %eax, %ecx
-; CHECK-NEXT: shlq $32, %rcx
-; CHECK-NEXT: orq %rcx, %rdx
-; CHECK-NEXT: orq $384, %rdx # imm = 0x180
-; CHECK-NEXT: movabsq $72057594037927808, %rax # imm = 0xFFFFFFFFFFFF80
-; CHECK-NEXT: andq %rdx, %rax
-; CHECK-NEXT: movl %eax, (%rdi)
-; CHECK-NEXT: shrq $32, %rax
-; CHECK-NEXT: movw %ax, 4(%rdi)
-; CHECK-NEXT: retq
+; X86-LABEL: i56_and_or:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl $384, %ecx # imm = 0x180
+; X86-NEXT: orl (%eax), %ecx
+; X86-NEXT: andl $-128, %ecx
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: retl
+;
+; X64-LABEL: i56_and_or:
+; X64: # BB#0:
+; X64-NEXT: movzwl 4(%rdi), %eax
+; X64-NEXT: movzbl 6(%rdi), %ecx
+; X64-NEXT: movl (%rdi), %edx
+; X64-NEXT: movb %cl, 6(%rdi)
+; X64-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
+; X64-NEXT: shll $16, %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: shlq $32, %rcx
+; X64-NEXT: orq %rcx, %rdx
+; X64-NEXT: orq $384, %rdx # imm = 0x180
+; X64-NEXT: movabsq $72057594037927808, %rax # imm = 0xFFFFFFFFFFFF80
+; X64-NEXT: andq %rdx, %rax
+; X64-NEXT: movl %eax, (%rdi)
+; X64-NEXT: shrq $32, %rax
+; X64-NEXT: movw %ax, 4(%rdi)
+; X64-NEXT: retq
%b = load i56, i56* %a, align 1
%c = and i56 %b, -128
%d = or i56 %c, 384
@@ -110,26 +172,37 @@ define void @i56_and_or(i56* %a) {
}
define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
-; CHECK-LABEL: i56_insert_bit:
-; CHECK: # BB#0:
-; CHECK-NEXT: movzbl %sil, %eax
-; CHECK-NEXT: movzwl 4(%rdi), %ecx
-; CHECK-NEXT: movzbl 6(%rdi), %edx
-; CHECK-NEXT: movl (%rdi), %esi
-; CHECK-NEXT: movb %dl, 6(%rdi)
-; CHECK-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> %RDX<def>
-; CHECK-NEXT: shll $16, %edx
-; CHECK-NEXT: orl %ecx, %edx
-; CHECK-NEXT: shlq $32, %rdx
-; CHECK-NEXT: orq %rdx, %rsi
-; CHECK-NEXT: shlq $13, %rax
-; CHECK-NEXT: movabsq $72057594037919743, %rcx # imm = 0xFFFFFFFFFFDFFF
-; CHECK-NEXT: andq %rsi, %rcx
-; CHECK-NEXT: orq %rax, %rcx
-; CHECK-NEXT: movl %ecx, (%rdi)
-; CHECK-NEXT: shrq $32, %rcx
-; CHECK-NEXT: movw %cx, 4(%rdi)
-; CHECK-NEXT: retq
+; X86-LABEL: i56_insert_bit:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll $13, %ecx
+; X86-NEXT: movl $-8193, %edx # imm = 0xDFFF
+; X86-NEXT: andl (%eax), %edx
+; X86-NEXT: orl %ecx, %edx
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: retl
+;
+; X64-LABEL: i56_insert_bit:
+; X64: # BB#0:
+; X64-NEXT: movzbl %sil, %eax
+; X64-NEXT: movzwl 4(%rdi), %ecx
+; X64-NEXT: movzbl 6(%rdi), %edx
+; X64-NEXT: movl (%rdi), %esi
+; X64-NEXT: movb %dl, 6(%rdi)
+; X64-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> %RDX<def>
+; X64-NEXT: shll $16, %edx
+; X64-NEXT: orl %ecx, %edx
+; X64-NEXT: shlq $32, %rdx
+; X64-NEXT: orq %rdx, %rsi
+; X64-NEXT: shlq $13, %rax
+; X64-NEXT: movabsq $72057594037919743, %rcx # imm = 0xFFFFFFFFFFDFFF
+; X64-NEXT: andq %rsi, %rcx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: shrq $32, %rcx
+; X64-NEXT: movw %cx, 4(%rdi)
+; X64-NEXT: retq
%extbit = zext i1 %bit to i56
%b = load i56, i56* %a, align 1
%extbit.shl = shl nuw nsw i56 %extbit, 13
diff --git a/test/CodeGen/X86/optimize-max-1.ll b/test/CodeGen/X86/optimize-max-1.ll
index 11e2f9a93a57..08cb86ab3989 100644
--- a/test/CodeGen/X86/optimize-max-1.ll
+++ b/test/CodeGen/X86/optimize-max-1.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 | not grep cmov
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; LSR should be able to eliminate both smax and umax expressions
; in loop trip counts.
@@ -6,6 +7,18 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define void @fs(double* nocapture %p, i64 %n) nounwind {
+; CHECK-LABEL: fs:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_1: # %bb
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq $0, (%rdi,%rax,8)
+; CHECK-NEXT: incq %rax
+; CHECK-NEXT: cmpq %rsi, %rax
+; CHECK-NEXT: jl .LBB0_1
+; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: retq
entry:
%tmp = icmp slt i64 %n, 1 ; <i1> [#uses=1]
%smax = select i1 %tmp, i64 1, i64 %n ; <i64> [#uses=1]
@@ -24,6 +37,18 @@ return: ; preds = %bb
}
define void @bs(double* nocapture %p, i64 %n) nounwind {
+; CHECK-LABEL: bs:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB1_1: # %bb
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq $0, (%rdi,%rax,8)
+; CHECK-NEXT: incq %rax
+; CHECK-NEXT: cmpq %rsi, %rax
+; CHECK-NEXT: jl .LBB1_1
+; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: retq
entry:
%tmp = icmp sge i64 %n, 1 ; <i1> [#uses=1]
%smax = select i1 %tmp, i64 %n, i64 1 ; <i64> [#uses=1]
@@ -42,6 +67,18 @@ return: ; preds = %bb
}
define void @fu(double* nocapture %p, i64 %n) nounwind {
+; CHECK-LABEL: fu:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB2_1: # %bb
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq $0, (%rdi,%rax,8)
+; CHECK-NEXT: incq %rax
+; CHECK-NEXT: cmpq %rsi, %rax
+; CHECK-NEXT: jb .LBB2_1
+; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: retq
entry:
%tmp = icmp eq i64 %n, 0 ; <i1> [#uses=1]
%umax = select i1 %tmp, i64 1, i64 %n ; <i64> [#uses=1]
@@ -60,6 +97,18 @@ return: ; preds = %bb
}
define void @bu(double* nocapture %p, i64 %n) nounwind {
+; CHECK-LABEL: bu:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB3_1: # %bb
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq $0, (%rdi,%rax,8)
+; CHECK-NEXT: incq %rax
+; CHECK-NEXT: cmpq %rsi, %rax
+; CHECK-NEXT: jb .LBB3_1
+; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: retq
entry:
%tmp = icmp ne i64 %n, 0 ; <i1> [#uses=1]
%umax = select i1 %tmp, i64 %n, i64 1 ; <i64> [#uses=1]
diff --git a/test/CodeGen/X86/optimize-max-2.ll b/test/CodeGen/X86/optimize-max-2.ll
index 45b542e2267c..37d2a20975a0 100644
--- a/test/CodeGen/X86/optimize-max-2.ll
+++ b/test/CodeGen/X86/optimize-max-2.ll
@@ -1,8 +1,5 @@
-; RUN: llc < %s -march=x86-64 | grep cmov | count 2
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; CHECK: jne
-; CHECK-NOT: jne
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; LSR's OptimizeMax function shouldn't try to eliminate this max, because
; it has three operands.
@@ -10,6 +7,24 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define void @foo(double* nocapture %p, i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: foo:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: testq %rdx, %rdx
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: cmovneq %rdx, %rax
+; CHECK-NEXT: cmpq %rsi, %rax
+; CHECK-NEXT: cmovbeq %rsi, %rax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_1: # %bb4
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: addsd %xmm0, %xmm0
+; CHECK-NEXT: movsd %xmm0, (%rdi)
+; CHECK-NEXT: addq $8, %rdi
+; CHECK-NEXT: decq %rax
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: retq
entry:
%tmp = icmp eq i64 %y, 0 ; <i1> [#uses=1]
%umax = select i1 %tmp, i64 1, i64 %y ; <i64> [#uses=2]
@@ -30,3 +45,4 @@ bb4: ; preds = %bb4, %entry
return: ; preds = %bb4
ret void
}
+
diff --git a/test/CodeGen/X86/pr15309.ll b/test/CodeGen/X86/pr15309.ll
index e9d9b9e54c13..0301b58def1c 100644
--- a/test/CodeGen/X86/pr15309.ll
+++ b/test/CodeGen/X86/pr15309.ll
@@ -1,15 +1,43 @@
-; RUN: llc < %s -mtriple=i686-linux-pc -mcpu=corei7 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-linux-pc | FileCheck %s
-define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>* nocapture %dest) noinline {
-L.entry:
- %0 = getelementptr <2 x i64>, <2 x i64>* %src, i32 10
- %1 = load <2 x i64>, <2 x i64>* %0, align 16
- %2 = uitofp <2 x i64> %1 to <2 x float>
- %3 = getelementptr <2 x float>, <2 x float>* %dest, i32 10
- store <2 x float> %2, <2 x float>* %3, align 8
+define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>* nocapture %dest) nounwind {
+; CHECK-LABEL: test_convert_float2_ulong2:
+; CHECK: # BB#0:
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: subl $20, %esp
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl 168(%ecx), %edx
+; CHECK-NEXT: movl 172(%ecx), %esi
+; CHECK-NEXT: movl 160(%ecx), %edi
+; CHECK-NEXT: movl 164(%ecx), %ecx
+; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edi, (%esp)
+; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: testl %ecx, %ecx
+; CHECK-NEXT: setns %dl
+; CHECK-NEXT: fildll (%esp)
+; CHECK-NEXT: fadds {{\.LCPI.*}}(,%edx,4)
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: testl %esi, %esi
+; CHECK-NEXT: setns %cl
+; CHECK-NEXT: fildll {{[0-9]+}}(%esp)
+; CHECK-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; CHECK-NEXT: fstps 84(%eax)
+; CHECK-NEXT: fstps 80(%eax)
+; CHECK-NEXT: addl $20, %esp
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: popl %edi
+; CHECK-NEXT: retl
+ %t0 = getelementptr <2 x i64>, <2 x i64>* %src, i32 10
+ %t1 = load <2 x i64>, <2 x i64>* %t0, align 16
+ %t2 = uitofp <2 x i64> %t1 to <2 x float>
+ %t3 = getelementptr <2 x float>, <2 x float>* %dest, i32 10
+ store <2 x float> %t2, <2 x float>* %t3, align 8
ret void
}
-; CHECK: test_convert_float2_ulong2
-; CHECK-NOT: cvtpd2ps
-; CHECK: ret
diff --git a/test/CodeGen/X86/pr23603.ll b/test/CodeGen/X86/pr23603.ll
index 6f856aedb8d5..315e60768613 100644
--- a/test/CodeGen/X86/pr23603.ll
+++ b/test/CodeGen/X86/pr23603.ll
@@ -1,14 +1,29 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
declare void @free_v()
-define void @f(i32* %x, i32 %c32, i32* %y) {
-; CHECK-LABEL: f
+define void @f(i32* %x, i32 %c32, i32* %y) nounwind {
+; CHECK-LABEL: f:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdx, %r14
+; CHECK-NEXT: movl %esi, %ebp
+; CHECK-NEXT: movl (%rdi), %ebx
+; CHECK-NEXT: callq free_v
+; CHECK-NEXT: testl %ebp, %ebp
+; CHECK-NEXT: je .LBB0_2
+; CHECK-NEXT: # BB#1: # %left
+; CHECK-NEXT: movl %ebx, (%r14)
+; CHECK-NEXT: .LBB0_2: # %merge
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: retq
entry:
%v = load i32, i32* %x, !invariant.load !0
-; CHECK: movl (%rdi), %ebx
-; CHECK: free_v
-; CHECK-NOT: movl (%rdi), %ebx
call void @free_v()
%c = icmp ne i32 %c32, 0
br i1 %c, label %left, label %merge
diff --git a/test/CodeGen/X86/pr33715.ll b/test/CodeGen/X86/pr33715.ll
new file mode 100644
index 000000000000..15432cfdb512
--- /dev/null
+++ b/test/CodeGen/X86/pr33715.ll
@@ -0,0 +1,16 @@
+; Make sure we don't crash with a build vector of integer constants.
+; RUN: llc %s -o /dev/null
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @patatino() {
+ %tmp = insertelement <4 x i32> <i32 1, i32 1, i32 undef, i32 undef>, i32 1, i32 2
+ %tmp1 = insertelement <4 x i32> %tmp, i32 1, i32 3
+ %tmp2 = icmp ne <4 x i32> %tmp1, zeroinitializer
+ %tmp3 = icmp slt <4 x i32> %tmp1, <i32 4, i32 4, i32 4, i32 4>
+ %tmp4 = or <4 x i1> %tmp2, %tmp3
+ %tmp5 = select <4 x i1> %tmp4, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 4, i32 4, i32 4>
+ %tmp6 = extractelement <4 x i32> %tmp5, i32 0
+ ret i32 %tmp6
+}
diff --git a/test/CodeGen/X86/rdrand-x86_64.ll b/test/CodeGen/X86/rdrand-x86_64.ll
new file mode 100644
index 000000000000..06f1136087bb
--- /dev/null
+++ b/test/CodeGen/X86/rdrand-x86_64.ll
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx-i -mattr=+rdrnd | FileCheck %s
+
+declare {i64, i32} @llvm.x86.rdrand.64()
+
+define i32 @_rdrand64_step(i64* %random_val) {
+; CHECK-LABEL: _rdrand64_step:
+; CHECK: # BB#0:
+; CHECK-NEXT: rdrandq %rcx
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: cmovael %ecx, %eax
+; CHECK-NEXT: movq %rcx, (%rdi)
+; CHECK-NEXT: retq
+ %call = call {i64, i32} @llvm.x86.rdrand.64()
+ %randval = extractvalue {i64, i32} %call, 0
+ store i64 %randval, i64* %random_val
+ %isvalid = extractvalue {i64, i32} %call, 1
+ ret i32 %isvalid
+}
diff --git a/test/CodeGen/X86/rdrand.ll b/test/CodeGen/X86/rdrand.ll
index 107cde05a0e6..0638e0095282 100644
--- a/test/CodeGen/X86/rdrand.ll
+++ b/test/CodeGen/X86/rdrand.ll
@@ -1,66 +1,117 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx-i -mattr=+rdrnd | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=core-avx-i -mattr=+rdrnd | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx-i -mattr=+rdrnd | FileCheck %s --check-prefix=X64
+
declare {i16, i32} @llvm.x86.rdrand.16()
declare {i32, i32} @llvm.x86.rdrand.32()
-declare {i64, i32} @llvm.x86.rdrand.64()
define i32 @_rdrand16_step(i16* %random_val) {
+; X86-LABEL: _rdrand16_step:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: rdrandw %ax
+; X86-NEXT: movzwl %ax, %edx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovael %edx, %eax
+; X86-NEXT: movw %dx, (%ecx)
+; X86-NEXT: retl
+;
+; X64-LABEL: _rdrand16_step:
+; X64: # BB#0:
+; X64-NEXT: rdrandw %ax
+; X64-NEXT: movzwl %ax, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovael %ecx, %eax
+; X64-NEXT: movw %cx, (%rdi)
+; X64-NEXT: retq
%call = call {i16, i32} @llvm.x86.rdrand.16()
%randval = extractvalue {i16, i32} %call, 0
store i16 %randval, i16* %random_val
%isvalid = extractvalue {i16, i32} %call, 1
ret i32 %isvalid
-; CHECK-LABEL: _rdrand16_step:
-; CHECK: rdrandw %ax
-; CHECK: movzwl %ax, %ecx
-; CHECK: movl $1, %eax
-; CHECK: cmovael %ecx, %eax
-; CHECK: movw %cx, (%r[[A0:di|cx]])
-; CHECK: ret
}
define i32 @_rdrand32_step(i32* %random_val) {
+; X86-LABEL: _rdrand32_step:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: rdrandl %edx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovael %edx, %eax
+; X86-NEXT: movl %edx, (%ecx)
+; X86-NEXT: retl
+;
+; X64-LABEL: _rdrand32_step:
+; X64: # BB#0:
+; X64-NEXT: rdrandl %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovael %ecx, %eax
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: retq
%call = call {i32, i32} @llvm.x86.rdrand.32()
%randval = extractvalue {i32, i32} %call, 0
store i32 %randval, i32* %random_val
%isvalid = extractvalue {i32, i32} %call, 1
ret i32 %isvalid
-; CHECK-LABEL: _rdrand32_step:
-; CHECK: rdrandl %e[[T0:[a-z]+]]
-; CHECK: movl $1, %eax
-; CHECK: cmovael %e[[T0]], %eax
-; CHECK: movl %e[[T0]], (%r[[A0]])
-; CHECK: ret
-}
-
-define i32 @_rdrand64_step(i64* %random_val) {
- %call = call {i64, i32} @llvm.x86.rdrand.64()
- %randval = extractvalue {i64, i32} %call, 0
- store i64 %randval, i64* %random_val
- %isvalid = extractvalue {i64, i32} %call, 1
- ret i32 %isvalid
-; CHECK-LABEL: _rdrand64_step:
-; CHECK: rdrandq %r[[T1:[a-z]+]]
-; CHECK: movl $1, %eax
-; CHECK: cmovael %e[[T1]], %eax
-; CHECK: movq %r[[T1]], (%r[[A0]])
-; CHECK: ret
}
; Check that MachineCSE doesn't eliminate duplicate rdrand instructions.
define i32 @CSE() nounwind {
+; X86-LABEL: CSE:
+; X86: # BB#0:
+; X86-NEXT: rdrandl %ecx
+; X86-NEXT: rdrandl %eax
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: CSE:
+; X64: # BB#0:
+; X64-NEXT: rdrandl %ecx
+; X64-NEXT: rdrandl %eax
+; X64-NEXT: addl %ecx, %eax
+; X64-NEXT: retq
%rand1 = tail call { i32, i32 } @llvm.x86.rdrand.32() nounwind
%v1 = extractvalue { i32, i32 } %rand1, 0
%rand2 = tail call { i32, i32 } @llvm.x86.rdrand.32() nounwind
%v2 = extractvalue { i32, i32 } %rand2, 0
%add = add i32 %v2, %v1
ret i32 %add
-; CHECK-LABEL: CSE:
-; CHECK: rdrandl
-; CHECK: rdrandl
}
; Check that MachineLICM doesn't hoist rdrand instructions.
define void @loop(i32* %p, i32 %n) nounwind {
+; X86-LABEL: loop:
+; X86: # BB#0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB3_3
+; X86-NEXT: # BB#1: # %while.body.preheader
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: .p2align 4, 0x90
+; X86-NEXT: .LBB3_2: # %while.body
+; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: rdrandl %edx
+; X86-NEXT: movl %edx, (%ecx)
+; X86-NEXT: leal 4(%ecx), %ecx
+; X86-NEXT: decl %eax
+; X86-NEXT: jne .LBB3_2
+; X86-NEXT: .LBB3_3: # %while.end
+; X86-NEXT: retl
+;
+; X64-LABEL: loop:
+; X64: # BB#0: # %entry
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB3_2
+; X64-NEXT: .p2align 4, 0x90
+; X64-NEXT: .LBB3_1: # %while.body
+; X64-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-NEXT: rdrandl %eax
+; X64-NEXT: movl %eax, (%rdi)
+; X64-NEXT: leaq 4(%rdi), %rdi
+; X64-NEXT: decl %esi
+; X64-NEXT: jne .LBB3_1
+; X64-NEXT: .LBB3_2: # %while.end
+; X64-NEXT: retq
entry:
%tobool1 = icmp eq i32 %n, 0
br i1 %tobool1, label %while.end, label %while.body
@@ -78,8 +129,4 @@ while.body: ; preds = %entry, %while.body
while.end: ; preds = %while.body, %entry
ret void
-; CHECK-LABEL: loop:
-; CHECK-NOT: rdrandl
-; CHECK: This Inner Loop Header: Depth=1
-; CHECK: rdrandl
}
diff --git a/test/CodeGen/X86/rdseed-x86_64.ll b/test/CodeGen/X86/rdseed-x86_64.ll
new file mode 100644
index 000000000000..b0d9748dd6ae
--- /dev/null
+++ b/test/CodeGen/X86/rdseed-x86_64.ll
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx-i -mattr=+rdseed | FileCheck %s
+
+declare {i64, i32} @llvm.x86.rdseed.64()
+
+define i32 @_rdseed64_step(i64* %random_val) {
+; CHECK-LABEL: _rdseed64_step:
+; CHECK: # BB#0:
+; CHECK-NEXT: rdseedq %rcx
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: cmovael %ecx, %eax
+; CHECK-NEXT: movq %rcx, (%rdi)
+; CHECK-NEXT: retq
+ %call = call {i64, i32} @llvm.x86.rdseed.64()
+ %randval = extractvalue {i64, i32} %call, 0
+ store i64 %randval, i64* %random_val
+ %isvalid = extractvalue {i64, i32} %call, 1
+ ret i32 %isvalid
+}
diff --git a/test/CodeGen/X86/rdseed.ll b/test/CodeGen/X86/rdseed.ll
index c219b4ad27ec..b22e3e7ceac0 100644
--- a/test/CodeGen/X86/rdseed.ll
+++ b/test/CodeGen/X86/rdseed.ll
@@ -1,48 +1,56 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx-i -mattr=+rdseed | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=core-avx-i -mattr=+rdseed | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx-i -mattr=+rdseed | FileCheck %s --check-prefix=X64
declare {i16, i32} @llvm.x86.rdseed.16()
declare {i32, i32} @llvm.x86.rdseed.32()
-declare {i64, i32} @llvm.x86.rdseed.64()
define i32 @_rdseed16_step(i16* %random_val) {
+; X86-LABEL: _rdseed16_step:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: rdseedw %ax
+; X86-NEXT: movzwl %ax, %edx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovael %edx, %eax
+; X86-NEXT: movw %dx, (%ecx)
+; X86-NEXT: retl
+;
+; X64-LABEL: _rdseed16_step:
+; X64: # BB#0:
+; X64-NEXT: rdseedw %ax
+; X64-NEXT: movzwl %ax, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovael %ecx, %eax
+; X64-NEXT: movw %cx, (%rdi)
+; X64-NEXT: retq
%call = call {i16, i32} @llvm.x86.rdseed.16()
%randval = extractvalue {i16, i32} %call, 0
store i16 %randval, i16* %random_val
%isvalid = extractvalue {i16, i32} %call, 1
ret i32 %isvalid
-; CHECK-LABEL: _rdseed16_step:
-; CHECK: rdseedw %ax
-; CHECK: movzwl %ax, %ecx
-; CHECK: movl $1, %eax
-; CHECK: cmovael %ecx, %eax
-; CHECK: movw %cx, (%r[[A0:di|cx]])
-; CHECK: ret
}
define i32 @_rdseed32_step(i32* %random_val) {
+; X86-LABEL: _rdseed32_step:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: rdseedl %edx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovael %edx, %eax
+; X86-NEXT: movl %edx, (%ecx)
+; X86-NEXT: retl
+;
+; X64-LABEL: _rdseed32_step:
+; X64: # BB#0:
+; X64-NEXT: rdseedl %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovael %ecx, %eax
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: retq
%call = call {i32, i32} @llvm.x86.rdseed.32()
%randval = extractvalue {i32, i32} %call, 0
store i32 %randval, i32* %random_val
%isvalid = extractvalue {i32, i32} %call, 1
ret i32 %isvalid
-; CHECK-LABEL: _rdseed32_step:
-; CHECK: rdseedl %e[[T0:[a-z]+]]
-; CHECK: movl $1, %eax
-; CHECK: cmovael %e[[T0]], %eax
-; CHECK: movl %e[[T0]], (%r[[A0]])
-; CHECK: ret
-}
-
-define i32 @_rdseed64_step(i64* %random_val) {
- %call = call {i64, i32} @llvm.x86.rdseed.64()
- %randval = extractvalue {i64, i32} %call, 0
- store i64 %randval, i64* %random_val
- %isvalid = extractvalue {i64, i32} %call, 1
- ret i32 %isvalid
-; CHECK-LABEL: _rdseed64_step:
-; CHECK: rdseedq %r[[T1:[a-z]+]]
-; CHECK: movl $1, %eax
-; CHECK: cmovael %e[[T1]], %eax
-; CHECK: movq %r[[T1]], (%r[[A0]])
-; CHECK: ret
}
diff --git a/test/CodeGen/X86/recip-fastmath.ll b/test/CodeGen/X86/recip-fastmath.ll
index 16e261bf3c5e..02a968c6f27d 100644
--- a/test/CodeGen/X86/recip-fastmath.ll
+++ b/test/CodeGen/X86/recip-fastmath.ll
@@ -45,9 +45,9 @@ define float @f32_no_estimate(float %x) #0 {
;
; SANDY-LABEL: f32_no_estimate:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50]
-; SANDY-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
+; SANDY-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [14:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_no_estimate:
; HASWELL: # BB#0:
@@ -113,11 +113,11 @@ define float @f32_one_step(float %x) #1 {
; SANDY: # BB#0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_one_step:
; HASWELL: # BB#0:
@@ -207,7 +207,7 @@ define float @f32_two_step(float %x) #2 {
; SANDY: # BB#0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
-; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vsubss %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm2 # sched: [5:1.00]
; SANDY-NEXT: vaddss %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
@@ -215,7 +215,7 @@ define float @f32_two_step(float %x) #2 {
; SANDY-NEXT: vsubss %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_two_step:
; HASWELL: # BB#0:
@@ -284,25 +284,25 @@ define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 {
;
; SANDY-LABEL: v4f32_no_estimate:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
-; SANDY-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
+; SANDY-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [14:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_no_estimate:
; HASWELL: # BB#0:
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 # sched: [4:0.50]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
; HASWELL-NEXT: retq # sched: [1:1.00]
;
; HASWELL-NO-FMA-LABEL: v4f32_no_estimate:
; HASWELL-NO-FMA: # BB#0:
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
; HASWELL-NO-FMA-NEXT: vdivps %xmm0, %xmm1, %xmm0
; HASWELL-NO-FMA-NEXT: retq
;
; AVX512-LABEL: v4f32_no_estimate:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 # sched: [4:0.50]
+; AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1] sched: [4:0.50]
; AVX512-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -350,18 +350,18 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
;
; SANDY-LABEL: v4f32_one_step:
; SANDY: # BB#0:
-; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [7:3.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
; SANDY-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_one_step:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
; HASWELL-NEXT: retq # sched: [1:1.00]
@@ -370,7 +370,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1]
; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0
; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0
@@ -379,7 +379,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; KNL-LABEL: v4f32_one_step:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
; KNL-NEXT: retq # sched: [1:1.00]
@@ -453,9 +453,9 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
;
; SANDY-LABEL: v4f32_two_step:
; SANDY: # BB#0:
-; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [7:3.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
; SANDY-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
; SANDY-NEXT: vmulps %xmm2, %xmm1, %xmm2 # sched: [5:1.00]
; SANDY-NEXT: vaddps %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
@@ -463,12 +463,12 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; SANDY-NEXT: vsubps %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_two_step:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
@@ -480,7 +480,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm2
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm3
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
; HASWELL-NO-FMA-NEXT: vsubps %xmm2, %xmm3, %xmm2
; HASWELL-NO-FMA-NEXT: vmulps %xmm2, %xmm1, %xmm2
; HASWELL-NO-FMA-NEXT: vaddps %xmm2, %xmm1, %xmm1
@@ -493,7 +493,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; KNL-LABEL: v4f32_two_step:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; KNL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
@@ -504,7 +504,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; SKX-LABEL: v4f32_two_step:
; SKX: # BB#0:
; SKX-NEXT: vrcp14ps %xmm0, %xmm1
-; SKX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; SKX-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; SKX-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
; SKX-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
@@ -541,30 +541,30 @@ define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
; BTVER2-LABEL: v8f32_no_estimate:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
-; BTVER2-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [19:19.00]
+; BTVER2-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [38:38.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_no_estimate:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
-; SANDY-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [12:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
+; SANDY-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [29:3.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_no_estimate:
; HASWELL: # BB#0:
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [19:2.00]
; HASWELL-NEXT: retq # sched: [1:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_no_estimate:
; HASWELL-NO-FMA: # BB#0:
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
; HASWELL-NO-FMA-NEXT: vdivps %ymm0, %ymm1, %ymm0
; HASWELL-NO-FMA-NEXT: retq
;
; AVX512-LABEL: v8f32_no_estimate:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 # sched: [5:1.00]
+; AVX512-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; AVX512-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [19:2.00]
; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -610,27 +610,27 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; BTVER2-LABEL: v8f32_one_step:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
-; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
-; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_one_step:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
; SANDY-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_one_step:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
; HASWELL-NEXT: retq # sched: [1:1.00]
@@ -639,7 +639,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm2
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1]
; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm2, %ymm0
; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0
; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0
@@ -648,7 +648,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; KNL-LABEL: v8f32_one_step:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
; KNL-NEXT: retq # sched: [1:1.00]
@@ -722,22 +722,22 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; BTVER2-LABEL: v8f32_two_step:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
-; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
-; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:1.00]
-; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:2.00]
+; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_two_step:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
; SANDY-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
@@ -745,12 +745,12 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_two_step:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
@@ -762,7 +762,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm2
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm3
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1]
; HASWELL-NO-FMA-NEXT: vsubps %ymm2, %ymm3, %ymm2
; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm2
; HASWELL-NO-FMA-NEXT: vaddps %ymm2, %ymm1, %ymm1
@@ -775,7 +775,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; KNL-LABEL: v8f32_two_step:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; KNL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
@@ -786,7 +786,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; SKX-LABEL: v8f32_two_step:
; SKX: # BB#0:
; SKX-NEXT: vrcp14ps %ymm0, %ymm1
-; SKX-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; SKX-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; SKX-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
; SKX-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
diff --git a/test/CodeGen/X86/recip-fastmath2.ll b/test/CodeGen/X86/recip-fastmath2.ll
index 440a6f0bef13..c82eab84757f 100644
--- a/test/CodeGen/X86/recip-fastmath2.ll
+++ b/test/CodeGen/X86/recip-fastmath2.ll
@@ -39,8 +39,8 @@ define float @f32_no_step_2(float %x) #3 {
; SANDY-LABEL: f32_no_step_2:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_no_step_2:
; HASWELL: # BB#0:
@@ -110,12 +110,12 @@ define float @f32_one_step_2(float %x) #1 {
; SANDY: # BB#0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_one_step_2:
; HASWELL: # BB#0:
@@ -198,13 +198,13 @@ define float @f32_one_step_2_divs(float %x) #1 {
; SANDY: # BB#0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:1.00]
+; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_one_step_2_divs:
; HASWELL: # BB#0:
@@ -305,7 +305,7 @@ define float @f32_two_step_2(float %x) #2 {
; SANDY: # BB#0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
-; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vsubss %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm2 # sched: [5:1.00]
; SANDY-NEXT: vaddss %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
@@ -313,8 +313,8 @@ define float @f32_two_step_2(float %x) #2 {
; SANDY-NEXT: vsubss %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_two_step_2:
; HASWELL: # BB#0:
@@ -403,19 +403,19 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
;
; SANDY-LABEL: v4f32_one_step2:
; SANDY: # BB#0:
-; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [7:3.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
; SANDY-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_one_step2:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
; HASWELL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
@@ -425,7 +425,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -435,7 +435,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; KNL-LABEL: v4f32_one_step2:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
; KNL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
@@ -501,20 +501,20 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
;
; SANDY-LABEL: v4f32_one_step_2_divs:
; SANDY: # BB#0:
-; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [7:3.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
; SANDY-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_one_step_2_divs:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
; HASWELL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
@@ -525,7 +525,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -536,7 +536,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; KNL-LABEL: v4f32_one_step_2_divs:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
; KNL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
@@ -619,9 +619,9 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
;
; SANDY-LABEL: v4f32_two_step2:
; SANDY: # BB#0:
-; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [7:3.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
; SANDY-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
; SANDY-NEXT: vmulps %xmm2, %xmm1, %xmm2 # sched: [5:1.00]
; SANDY-NEXT: vaddps %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
@@ -629,13 +629,13 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; SANDY-NEXT: vsubps %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_two_step2:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
@@ -648,7 +648,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:0.50]
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm3 # sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1] sched: [4:0.50]
; HASWELL-NO-FMA-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm2, %xmm1, %xmm2 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vaddps %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
@@ -662,7 +662,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; KNL-LABEL: v4f32_two_step2:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; KNL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
@@ -674,7 +674,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; SKX-LABEL: v4f32_two_step2:
; SKX: # BB#0:
; SKX-NEXT: vrcp14ps %xmm0, %xmm1
-; SKX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; SKX-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [4:0.50]
; SKX-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
; SKX-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
@@ -729,29 +729,29 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; BTVER2-LABEL: v8f32_one_step2:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
-; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
-; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_one_step2:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
; SANDY-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_one_step2:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
@@ -761,7 +761,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -771,7 +771,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; KNL-LABEL: v8f32_one_step2:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
@@ -835,31 +835,31 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; BTVER2-LABEL: v8f32_one_step_2_divs:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
-; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
-; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [7:1.00]
-; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [7:2.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_one_step_2_divs:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
; SANDY-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [9:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [12:1.00]
; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_one_step_2_divs:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [9:1.00]
@@ -870,7 +870,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -881,7 +881,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; KNL-LABEL: v8f32_one_step_2_divs:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [9:1.00]
@@ -964,23 +964,23 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; BTVER2-LABEL: v8f32_two_step2:
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
-; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
-; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:1.00]
-; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:2.00]
+; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_two_step2:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:1.00]
-; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
; SANDY-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
@@ -988,13 +988,13 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_two_step2:
; HASWELL: # BB#0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
@@ -1007,7 +1007,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; HASWELL-NO-FMA: # BB#0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:1.00]
-; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm3 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
@@ -1021,7 +1021,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; KNL-LABEL: v8f32_two_step2:
; KNL: # BB#0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
-; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; KNL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
@@ -1033,7 +1033,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; SKX-LABEL: v8f32_two_step2:
; SKX: # BB#0:
; SKX-NEXT: vrcp14ps %ymm0, %ymm1
-; SKX-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; SKX-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [5:1.00]
; SKX-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
; SKX-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
@@ -1064,13 +1064,13 @@ define <8 x float> @v8f32_no_step(<8 x float> %x) #3 {
;
; BTVER2-LABEL: v8f32_no_step:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_no_step:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_no_step:
; HASWELL: # BB#0:
@@ -1118,15 +1118,15 @@ define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 {
;
; BTVER2-LABEL: v8f32_no_step2:
; BTVER2: # BB#0:
-; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00]
-; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_no_step2:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:1.00]
-; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_no_step2:
; HASWELL: # BB#0:
diff --git a/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll b/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
index ba8ff1bc1819..3bb14c4b1cd8 100644
--- a/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
+++ b/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -o - -mtriple=x86_64-apple-macosx | FileCheck %s
+; RUN: llc -lsr-filter-same-scaled-reg=false < %s -o - -mtriple=x86_64-apple-macosx | FileCheck %s
; Test case for the recoloring of broken hints.
; This is tricky to have something reasonably small to kick this optimization since
; it requires that spliting and spilling occur.
diff --git a/test/CodeGen/X86/rotate4.ll b/test/CodeGen/X86/rotate4.ll
index 56a7d3285056..c7117be91ab4 100644
--- a/test/CodeGen/X86/rotate4.ll
+++ b/test/CodeGen/X86/rotate4.ll
@@ -1,17 +1,20 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; Check that we recognize this idiom for rotation too:
; a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1))
define i32 @rotate_left_32(i32 %a, i32 %b) {
; CHECK-LABEL: rotate_left_32:
-; CHECK-NOT: and
-; CHECK: roll
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: roll %cl, %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
%and = and i32 %b, 31
%shl = shl i32 %a, %and
- %0 = sub i32 0, %b
- %and3 = and i32 %0, 31
+ %t0 = sub i32 0, %b
+ %and3 = and i32 %t0, 31
%shr = lshr i32 %a, %and3
%or = or i32 %shl, %shr
ret i32 %or
@@ -19,13 +22,15 @@ entry:
define i32 @rotate_right_32(i32 %a, i32 %b) {
; CHECK-LABEL: rotate_right_32:
-; CHECK-NOT: and
-; CHECK: rorl
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rorl %cl, %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
%and = and i32 %b, 31
%shl = lshr i32 %a, %and
- %0 = sub i32 0, %b
- %and3 = and i32 %0, 31
+ %t0 = sub i32 0, %b
+ %and3 = and i32 %t0, 31
%shr = shl i32 %a, %and3
%or = or i32 %shl, %shr
ret i32 %or
@@ -33,13 +38,15 @@ entry:
define i64 @rotate_left_64(i64 %a, i64 %b) {
; CHECK-LABEL: rotate_left_64:
-; CHECK-NOT: and
-; CHECK: rolq
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rolq %cl, %rdi
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: retq
%and = and i64 %b, 63
%shl = shl i64 %a, %and
- %0 = sub i64 0, %b
- %and3 = and i64 %0, 63
+ %t0 = sub i64 0, %b
+ %and3 = and i64 %t0, 63
%shr = lshr i64 %a, %and3
%or = or i64 %shl, %shr
ret i64 %or
@@ -47,13 +54,15 @@ entry:
define i64 @rotate_right_64(i64 %a, i64 %b) {
; CHECK-LABEL: rotate_right_64:
-; CHECK-NOT: and
-; CHECK: rorq
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rorq %cl, %rdi
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: retq
%and = and i64 %b, 63
%shl = lshr i64 %a, %and
- %0 = sub i64 0, %b
- %and3 = and i64 %0, 63
+ %t0 = sub i64 0, %b
+ %and3 = and i64 %t0, 63
%shr = shl i64 %a, %and3
%or = or i64 %shl, %shr
ret i64 %or
@@ -63,16 +72,15 @@ entry:
define void @rotate_left_m32(i32 *%pa, i32 %b) {
; CHECK-LABEL: rotate_left_m32:
-; CHECK-NOT: and
-; CHECK: roll
-; no store:
-; CHECK-NOT: mov
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: roll %cl, (%rdi)
+; CHECK-NEXT: retq
%a = load i32, i32* %pa, align 16
%and = and i32 %b, 31
%shl = shl i32 %a, %and
- %0 = sub i32 0, %b
- %and3 = and i32 %0, 31
+ %t0 = sub i32 0, %b
+ %and3 = and i32 %t0, 31
%shr = lshr i32 %a, %and3
%or = or i32 %shl, %shr
store i32 %or, i32* %pa, align 32
@@ -81,16 +89,15 @@ entry:
define void @rotate_right_m32(i32 *%pa, i32 %b) {
; CHECK-LABEL: rotate_right_m32:
-; CHECK-NOT: and
-; CHECK: rorl
-; no store:
-; CHECK-NOT: mov
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rorl %cl, (%rdi)
+; CHECK-NEXT: retq
%a = load i32, i32* %pa, align 16
%and = and i32 %b, 31
%shl = lshr i32 %a, %and
- %0 = sub i32 0, %b
- %and3 = and i32 %0, 31
+ %t0 = sub i32 0, %b
+ %and3 = and i32 %t0, 31
%shr = shl i32 %a, %and3
%or = or i32 %shl, %shr
store i32 %or, i32* %pa, align 32
@@ -99,16 +106,15 @@ entry:
define void @rotate_left_m64(i64 *%pa, i64 %b) {
; CHECK-LABEL: rotate_left_m64:
-; CHECK-NOT: and
-; CHECK: rolq
-; no store:
-; CHECK-NOT: mov
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rolq %cl, (%rdi)
+; CHECK-NEXT: retq
%a = load i64, i64* %pa, align 16
%and = and i64 %b, 63
%shl = shl i64 %a, %and
- %0 = sub i64 0, %b
- %and3 = and i64 %0, 63
+ %t0 = sub i64 0, %b
+ %and3 = and i64 %t0, 63
%shr = lshr i64 %a, %and3
%or = or i64 %shl, %shr
store i64 %or, i64* %pa, align 64
@@ -117,18 +123,18 @@ entry:
define void @rotate_right_m64(i64 *%pa, i64 %b) {
; CHECK-LABEL: rotate_right_m64:
-; CHECK-NOT: and
-; CHECK: rorq
-; no store:
-; CHECK-NOT: mov
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rorq %cl, (%rdi)
+; CHECK-NEXT: retq
%a = load i64, i64* %pa, align 16
%and = and i64 %b, 63
%shl = lshr i64 %a, %and
- %0 = sub i64 0, %b
- %and3 = and i64 %0, 63
+ %t0 = sub i64 0, %b
+ %and3 = and i64 %t0, 63
%shr = shl i64 %a, %and3
%or = or i64 %shl, %shr
store i64 %or, i64* %pa, align 64
ret void
}
+
diff --git a/test/CodeGen/X86/sbb.ll b/test/CodeGen/X86/sbb.ll
index 414780b2d4e6..b6e8ebf6ed06 100644
--- a/test/CodeGen/X86/sbb.ll
+++ b/test/CodeGen/X86/sbb.ll
@@ -146,10 +146,8 @@ define i32 @ugt_select_neg1_or_0(i32 %x, i32 %y) nounwind {
define i32 @uge_select_0_or_neg1(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: uge_select_0_or_neg1:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl %esi, %edi
-; CHECK-NEXT: setae %al
-; CHECK-NEXT: decl %eax
+; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
%cmp = icmp uge i32 %x, %y
%ext = zext i1 %cmp to i32
@@ -163,10 +161,8 @@ define i32 @uge_select_0_or_neg1(i32 %x, i32 %y) nounwind {
define i32 @ule_select_0_or_neg1(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ule_select_0_or_neg1:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: cmpl %edi, %esi
-; CHECK-NEXT: setbe %al
-; CHECK-NEXT: decl %eax
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
%cmp = icmp ule i32 %y, %x
%ext = zext i1 %cmp to i32
@@ -180,10 +176,8 @@ define i32 @ule_select_0_or_neg1(i32 %x, i32 %y) nounwind {
define i32 @uge_select_0_or_neg1_sub(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: uge_select_0_or_neg1_sub:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl %esi, %edi
-; CHECK-NEXT: setae %al
-; CHECK-NEXT: decl %eax
+; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
%cmp = icmp uge i32 %x, %y
%ext = zext i1 %cmp to i32
@@ -191,6 +185,38 @@ define i32 @uge_select_0_or_neg1_sub(i32 %x, i32 %y) nounwind {
ret i32 %sub
}
+; Check more sub-from-zero patterns.
+; (X >u Y) ? -1 : 0 --> cmp, sbb
+
+define i64 @ugt_select_neg1_or_0_sub(i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: ugt_select_neg1_or_0_sub:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpq %rdi, %rsi
+; CHECK-NEXT: sbbq %rax, %rax
+; CHECK-NEXT: retq
+ %cmp = icmp ugt i64 %x, %y
+ %zext = zext i1 %cmp to i64
+ %sub = sub i64 0, %zext
+ ret i64 %sub
+}
+
+; Swap the predicate and compare operands:
+; (Y <u X) ? -1 : 0 --> cmp, sbb
+
+define i16 @ult_select_neg1_or_0_sub(i16 %x, i16 %y) nounwind {
+; CHECK-LABEL: ult_select_neg1_or_0_sub:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpw %di, %si
+; CHECK-NEXT: sbbw %ax, %ax
+; CHECK-NEXT: retq
+ %cmp = icmp ult i16 %y, %x
+ %zext = zext i1 %cmp to i16
+ %sub = sub i16 0, %zext
+ ret i16 %sub
+}
+
+
+
; Make sure we're creating nodes with the right value types. This would crash.
; https://bugs.llvm.org/show_bug.cgi?id=33560
diff --git a/test/CodeGen/X86/select_const.ll b/test/CodeGen/X86/select_const.ll
index a97e7c299e73..0eb9bf46ffd1 100644
--- a/test/CodeGen/X86/select_const.ll
+++ b/test/CodeGen/X86/select_const.ll
@@ -205,6 +205,111 @@ define i32 @select_C_Cplus1_signext(i1 signext %cond) {
ret i32 %sel
}
+; If the constants differ by a small multiplier, use LEA.
+; select Cond, C1, C2 --> add (mul (zext Cond), C1-C2), C2 --> LEA C2(Cond * (C1-C2))
+
+define i32 @select_lea_2(i1 zeroext %cond) {
+; CHECK-LABEL: select_lea_2:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movl $-1, %ecx
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 -1, i32 1
+ ret i32 %sel
+}
+
+define i64 @select_lea_3(i1 zeroext %cond) {
+; CHECK-LABEL: select_lea_3:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movl $1, %ecx
+; CHECK-NEXT: movq $-2, %rax
+; CHECK-NEXT: cmoveq %rcx, %rax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i64 -2, i64 1
+ ret i64 %sel
+}
+
+define i32 @select_lea_5(i1 zeroext %cond) {
+; CHECK-LABEL: select_lea_5:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movl $-2, %ecx
+; CHECK-NEXT: movl $3, %eax
+; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 -2, i32 3
+ ret i32 %sel
+}
+
+define i64 @select_lea_9(i1 zeroext %cond) {
+; CHECK-LABEL: select_lea_9:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movl $2, %ecx
+; CHECK-NEXT: movq $-7, %rax
+; CHECK-NEXT: cmoveq %rcx, %rax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i64 -7, i64 2
+ ret i64 %sel
+}
+
+
+; If the constants differ by a large power-of-2, that can be a shift of the difference plus the smaller constant.
+; select Cond, C1, C2 --> add (mul (zext Cond), C1-C2), C2
+
+define i8 @select_pow2_diff(i1 zeroext %cond) {
+; CHECK-LABEL: select_pow2_diff:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movb $19, %al
+; CHECK-NEXT: jne .LBB22_2
+; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: movb $3, %al
+; CHECK-NEXT: .LBB22_2:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i8 19, i8 3
+ ret i8 %sel
+}
+
+define i16 @select_pow2_diff_invert(i1 zeroext %cond) {
+; CHECK-LABEL: select_pow2_diff_invert:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movw $7, %cx
+; CHECK-NEXT: movw $71, %ax
+; CHECK-NEXT: cmovnew %cx, %ax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i16 7, i16 71
+ ret i16 %sel
+}
+
+define i32 @select_pow2_diff_neg(i1 zeroext %cond) {
+; CHECK-LABEL: select_pow2_diff_neg:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movl $-9, %ecx
+; CHECK-NEXT: movl $-25, %eax
+; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 -9, i32 -25
+ ret i32 %sel
+}
+
+define i64 @select_pow2_diff_neg_invert(i1 zeroext %cond) {
+; CHECK-LABEL: select_pow2_diff_neg_invert:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movl $29, %ecx
+; CHECK-NEXT: movq $-99, %rax
+; CHECK-NEXT: cmoveq %rcx, %rax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i64 -99, i64 29
+ ret i64 %sel
+}
+
; In general, select of 2 constants could be:
; select Cond, C1, C2 --> add (mul (zext Cond), C1-C2), C2 --> add (and (sext Cond), C1-C2), C2
@@ -263,11 +368,11 @@ define <4 x i32> @sel_constants_add_constant_vec(i1 %cond) {
; CHECK-LABEL: sel_constants_add_constant_vec:
; CHECK: # BB#0:
; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: jne .LBB22_1
+; CHECK-NEXT: jne .LBB30_1
; CHECK-NEXT: # BB#2:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [12,13,14,15]
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB22_1:
+; CHECK-NEXT: .LBB30_1:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [4294967293,14,4,4]
; CHECK-NEXT: retq
%sel = select i1 %cond, <4 x i32> <i32 -4, i32 12, i32 1, i32 0>, <4 x i32> <i32 11, i32 11, i32 11, i32 11>
@@ -279,11 +384,11 @@ define <2 x double> @sel_constants_fmul_constant_vec(i1 %cond) {
; CHECK-LABEL: sel_constants_fmul_constant_vec:
; CHECK: # BB#0:
; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: jne .LBB23_1
+; CHECK-NEXT: jne .LBB31_1
; CHECK-NEXT: # BB#2:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [1.188300e+02,3.454000e+01]
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB23_1:
+; CHECK-NEXT: .LBB31_1:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [-2.040000e+01,3.768000e+01]
; CHECK-NEXT: retq
%sel = select i1 %cond, <2 x double> <double -4.0, double 12.0>, <2 x double> <double 23.3, double 11.0>
diff --git a/test/CodeGen/X86/shift-codegen.ll b/test/CodeGen/X86/shift-codegen.ll
index 7d52bdeb9e3a..295a55d86a00 100644
--- a/test/CodeGen/X86/shift-codegen.ll
+++ b/test/CodeGen/X86/shift-codegen.ll
@@ -1,38 +1,36 @@
-; RUN: llc < %s -relocation-model=static -march=x86 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -relocation-model=static -mtriple=i686-unknown-unknown | FileCheck %s
; This should produce two shll instructions, not any lea's.
target triple = "i686-apple-darwin8"
-@Y = weak global i32 0 ; <i32*> [#uses=1]
-@X = weak global i32 0 ; <i32*> [#uses=2]
-
+@Y = weak global i32 0
+@X = weak global i32 0
define void @fn1() {
; CHECK-LABEL: fn1:
-; CHECK-NOT: ret
-; CHECK-NOT: lea
-; CHECK: shll $3
-; CHECK-NOT: lea
-; CHECK: ret
-
- %tmp = load i32, i32* @Y ; <i32> [#uses=1]
- %tmp1 = shl i32 %tmp, 3 ; <i32> [#uses=1]
- %tmp2 = load i32, i32* @X ; <i32> [#uses=1]
- %tmp3 = or i32 %tmp1, %tmp2 ; <i32> [#uses=1]
+; CHECK: # BB#0:
+; CHECK-NEXT: movl Y, %eax
+; CHECK-NEXT: shll $3, %eax
+; CHECK-NEXT: orl %eax, X
+; CHECK-NEXT: retl
+ %tmp = load i32, i32* @Y
+ %tmp1 = shl i32 %tmp, 3
+ %tmp2 = load i32, i32* @X
+ %tmp3 = or i32 %tmp1, %tmp2
store i32 %tmp3, i32* @X
ret void
}
define i32 @fn2(i32 %X, i32 %Y) {
; CHECK-LABEL: fn2:
-; CHECK-NOT: ret
-; CHECK-NOT: lea
-; CHECK: shll $3
-; CHECK-NOT: lea
-; CHECK: ret
-
- %tmp2 = shl i32 %Y, 3 ; <i32> [#uses=1]
- %tmp4 = or i32 %tmp2, %X ; <i32> [#uses=1]
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: shll $3, %eax
+; CHECK-NEXT: orl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: retl
+ %tmp2 = shl i32 %Y, 3
+ %tmp4 = or i32 %tmp2, %X
ret i32 %tmp4
}
diff --git a/test/CodeGen/X86/shift-folding.ll b/test/CodeGen/X86/shift-folding.ll
index 698878708977..76cf4a41a6cb 100644
--- a/test/CodeGen/X86/shift-folding.ll
+++ b/test/CodeGen/X86/shift-folding.ll
@@ -1,12 +1,13 @@
-; RUN: llc < %s -march=x86 -verify-coalescing | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-coalescing | FileCheck %s
define i32* @test1(i32* %P, i32 %X) {
; CHECK-LABEL: test1:
-; CHECK-NOT: shrl
-; CHECK-NOT: shll
-; CHECK: ret
-
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: andl $-4, %eax
+; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: retl
%Y = lshr i32 %X, 2
%gep.upgrd.1 = zext i32 %Y to i64
%P2 = getelementptr i32, i32* %P, i64 %gep.upgrd.1
@@ -15,11 +16,11 @@ entry:
define i32* @test2(i32* %P, i32 %X) {
; CHECK-LABEL: test2:
-; CHECK: shll $4
-; CHECK-NOT: shll
-; CHECK: ret
-
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: shll $4, %eax
+; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: retl
%Y = shl i32 %X, 2
%gep.upgrd.2 = zext i32 %Y to i64
%P2 = getelementptr i32, i32* %P, i64 %gep.upgrd.2
@@ -28,11 +29,11 @@ entry:
define i32* @test3(i32* %P, i32 %X) {
; CHECK-LABEL: test3:
-; CHECK-NOT: shrl
-; CHECK-NOT: shll
-; CHECK: ret
-
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: andl $-4, %eax
+; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: retl
%Y = ashr i32 %X, 2
%P2 = getelementptr i32, i32* %P, i32 %Y
ret i32* %P2
@@ -40,25 +41,27 @@ entry:
define fastcc i32 @test4(i32* %d) {
; CHECK-LABEL: test4:
-; CHECK-NOT: shrl
-; CHECK: ret
-
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzbl 3(%ecx), %eax
+; CHECK-NEXT: retl
%tmp4 = load i32, i32* %d
%tmp512 = lshr i32 %tmp4, 24
ret i32 %tmp512
}
-define i64 @test5(i16 %i, i32* %arr) {
; Ensure that we don't fold away shifts which have multiple uses, as they are
; just re-introduced for the second use.
-; CHECK-LABEL: test5:
-; CHECK-NOT: shrl
-; CHECK: shrl $11
-; CHECK-NOT: shrl
-; CHECK: ret
-entry:
+define i64 @test5(i16 %i, i32* %arr) {
+; CHECK-LABEL: test5:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: shrl $11, %eax
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: addl (%ecx,%eax,4), %eax
+; CHECK-NEXT: setb %dl
+; CHECK-NEXT: retl
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%index.zext = zext i32 %index to i64
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-256.ll b/test/CodeGen/X86/shuffle-vs-trunc-256.ll
index ee8921c41a06..c84869433546 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-256.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-256.ll
@@ -37,24 +37,16 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
;
; AVX512F-LABEL: shuffle_v32i8_to_v16i8:
; AVX512F: # BB#0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v16i8:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -62,11 +54,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-LABEL: shuffle_v32i8_to_v16i8:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -74,12 +62,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BWVL-LABEL: shuffle_v32i8_to_v16i8:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vmovdqu {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
@@ -166,11 +149,8 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX2-LABEL: shuffle_v16i16_to_v8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vmovdqa %xmm0, (%rsi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -178,11 +158,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v16i16_to_v8i16:
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -190,42 +166,22 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512VL-LABEL: shuffle_v16i16_to_v8i16:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512VL-NEXT: vpmovdw %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v8i16:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v8i16:
; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512BWVL-NEXT: vpmovdw %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %L
@@ -293,48 +249,50 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
}
define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
-; AVX-LABEL: shuffle_v8i32_to_v4i32:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps (%rdi), %ymm0
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX-NEXT: vmovaps %xmm0, (%rsi)
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; AVX1-LABEL: shuffle_v8i32_to_v4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps (%rdi), %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vmovaps %xmm0, (%rsi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_to_v4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v8i32_to_v4i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: vmovaps (%rdi), %ymm0
-; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX512F-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX512F-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_to_v4i32:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX512VL-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512VL-NEXT: vpmovqd %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v8i32_to_v4i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vmovaps (%rdi), %ymm0
-; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX512BW-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v8i32_to_v4i32:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX512BWVL-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vpmovqd %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <8 x i32>, <8 x i32>* %L
@@ -413,11 +371,9 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX2-LABEL: shuffle_v32i8_to_v8i8:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vmovq %xmm0, (%rsi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -425,11 +381,8 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i8_to_v8i8:
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -437,39 +390,23 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-LABEL: shuffle_v32i8_to_v8i8:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
+; AVX512VL-NEXT: vpmovdb %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v8i8:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8:
; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
@@ -542,26 +479,19 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-LABEL: shuffle_v16i16_to_v4i16:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovdqa (%rdi), %ymm0
+; AVX1-NEXT: vmovaps (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vmovq %xmm0, (%rsi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_to_v4i16:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa (%rdi), %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX2-NEXT: vmovq %xmm0, (%rsi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -569,12 +499,8 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v16i16_to_v4i16:
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX512F-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -582,31 +508,23 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512VL-LABEL: shuffle_v16i16_to_v4i16:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi)
+; AVX512VL-NEXT: vpmovqw %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v4i16:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16:
; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512BWVL-NEXT: vpmovqw %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %L
@@ -676,24 +594,19 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovdqa (%rdi), %ymm0
+; AVX1-NEXT: vmovaps (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vmovd %xmm0, (%rsi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa (%rdi), %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vmovd %xmm0, (%rsi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -701,11 +614,8 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i8_to_v4i8:
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -713,30 +623,23 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
+; AVX512VL-NEXT: vpmovqb %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8:
; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512BWVL-NEXT: vpmovqb %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
@@ -802,3 +705,73 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
store <4 x i8> %strided.vec, <4 x i8>* %S
ret void
}
+
+; In this case not all elements are collected from the same source vector, so
+; the resulting BUILD_VECTOR should not be combined to a truncate.
+define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
+; AVX1-LABEL: negative:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[u,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u],zero,zero,zero,zero,zero,zero,zero,xmm0[0,2,4,6,8,10,12,14]
+; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: negative:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: negative:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: negative:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: negative:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: negative:
+; AVX512BWVL: # BB#0:
+; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
+; AVX512BWVL-NEXT: movl $65537, %eax # imm = 0x10001
+; AVX512BWVL-NEXT: kmovd %eax, %k1
+; AVX512BWVL-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
+; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX512BWVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %strided.vec = shufflevector <32 x i8> %v, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ %w0 = extractelement <32 x i8> %w, i32 0
+ %merged = insertelement <16 x i8> %strided.vec, i8 %w0, i32 0
+ ret <16 x i8> %merged
+}
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/test/CodeGen/X86/shuffle-vs-trunc-512.ll
index a3ba58975800..69155b5cc565 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-512.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-512.ll
@@ -11,49 +11,37 @@
define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v32i8:
; AVX512F: # BB#0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpmovsxwd 32(%rdi), %zmm1
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v32i8:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpmovsxwd 32(%rdi), %zmm1
+; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v32i8:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, (%rsi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8:
; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu8 (%rdi), %zmm0
-; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX512BWVL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512BWVL-NEXT: vmovdqu %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0
+; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
@@ -106,54 +94,12 @@ define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
}
define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
-; AVX512F-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512F: # BB#0:
-; AVX512F-NEXT: vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX512F-NEXT: vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX512F-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[0,2],ymm1[4,6],ymm0[4,6]
-; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
-; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: retq
-;
-; AVX512VL-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX512VL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[0,2],ymm1[4,6],ymm0[4,6]
-; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi)
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
-;
-; AVX512BW-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX512BW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi)
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0
-; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT: vmovdqu {{.*#+}} ymm2 = [0,2,4,6,16,18,20,22,8,10,12,14,24,26,28,30]
-; AVX512BWVL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2
-; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm2[0,2,1,3]
-; AVX512BWVL-NEXT: vmovdqu %ymm0, (%rsi)
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
+; AVX512-LABEL: shuffle_v32i16_to_v16i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %L
%strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
store <16 x i16> %strided.vec, <16 x i16>* %S
@@ -177,11 +123,8 @@ define void @trunc_v16i32_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
define void @shuffle_v16i32_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
; AVX512-LABEL: shuffle_v16i32_to_v8i32:
; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, (%rsi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %L
@@ -205,127 +148,12 @@ define void @trunc_v8i64_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
}
define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
-; AVX512F-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512F: # BB#0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT: vpshufb %xmm3, %xmm1, %xmm1
-; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: retq
-;
-; AVX512VL-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512VL-NEXT: vpshufb %xmm3, %xmm1, %xmm1
-; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512VL-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
-;
-; AVX512BW-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0
-; AVX512BW-NEXT: vpextrb $4, %xmm0, %eax
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX512BW-NEXT: vmovd %ecx, %xmm1
-; AVX512BW-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $12, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2
-; AVX512BW-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2
-; AVX512BW-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $4, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $12, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
-; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu8 (%rdi), %zmm0
-; AVX512BWVL-NEXT: vpextrb $4, %xmm0, %eax
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX512BWVL-NEXT: vmovd %ecx, %xmm1
-; AVX512BWVL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $12, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2
-; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
-; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $4, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $12, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
-; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi)
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
+; AVX512-LABEL: shuffle_v64i8_to_v16i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
%strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
store <16 x i8> %strided.vec, <16 x i8>* %S
@@ -347,99 +175,12 @@ define void @trunc_v16i32_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
}
define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
-; AVX512F-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512F: # BB#0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; AVX512F-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX512F-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
-; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; AVX512F-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: retq
-;
-; AVX512VL-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
-; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
-;
-; AVX512BW-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0
-; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
-; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2
-; AVX512BW-NEXT: vmovd %xmm2, %eax
-; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrw $4, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2
-; AVX512BW-NEXT: vmovd %xmm2, %eax
-; AVX512BW-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrw $4, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512BW-NEXT: vmovd %xmm0, %eax
-; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0
-; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
-; AVX512BWVL-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2
-; AVX512BWVL-NEXT: vmovd %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrw $4, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
-; AVX512BWVL-NEXT: vmovd %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrw $4, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512BWVL-NEXT: vmovd %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi)
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
+; AVX512-LABEL: shuffle_v32i16_to_v8i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512-NEXT: vpmovqw %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %L
%strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
store <8 x i16> %strided.vec, <8 x i16>* %S
@@ -461,95 +202,12 @@ define void @trunc_v8i64_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
}
define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
-; AVX512F-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512F: # BB#0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT: vpshufb %xmm3, %xmm1, %xmm1
-; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX512F-NEXT: vmovq %xmm0, (%rsi)
-; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: retq
-;
-; AVX512VL-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512VL-NEXT: vpshufb %xmm3, %xmm1, %xmm1
-; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX512VL-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
-;
-; AVX512BW-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0
-; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm1
-; AVX512BW-NEXT: vpextrb $8, %xmm1, %r8d
-; AVX512BW-NEXT: vpextrb $0, %xmm1, %r9d
-; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm1
-; AVX512BW-NEXT: vpextrb $8, %xmm1, %r10d
-; AVX512BW-NEXT: vpextrb $0, %xmm1, %r11d
-; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm1
-; AVX512BW-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512BW-NEXT: vpextrb $0, %xmm1, %ecx
-; AVX512BW-NEXT: vpextrb $8, %xmm0, %edx
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %edi
-; AVX512BW-NEXT: vmovd %edi, %xmm0
-; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX512BW-NEXT: vpinsrb $4, %r11d, %xmm0, %xmm0
-; AVX512BW-NEXT: vpinsrb $5, %r10d, %xmm0, %xmm0
-; AVX512BW-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
-; AVX512BW-NEXT: vpinsrb $7, %r8d, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vmovdqu8 (%rdi), %zmm0
-; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX512BWVL-NEXT: vmovd %ecx, %xmm1
-; AVX512BWVL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2
-; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
-; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm0
-; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
+; AVX512-LABEL: shuffle_v64i8_to_v8i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512-NEXT: vpmovqb %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
%strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56>
store <8 x i8> %strided.vec, <8 x i8>* %S
diff --git a/test/CodeGen/X86/sink-blockfreq.ll b/test/CodeGen/X86/sink-blockfreq.ll
index 5436cf248bd5..d0b8972cee50 100644
--- a/test/CodeGen/X86/sink-blockfreq.ll
+++ b/test/CodeGen/X86/sink-blockfreq.ll
@@ -2,7 +2,7 @@
; RUN: llc -disable-preheader-prot=true -disable-machine-licm -machine-sink-bfi=false -mtriple=x86_64-apple-darwin < %s | FileCheck %s -check-prefix=MSINK_NOBFI
; Test that by changing BlockFrequencyInfo we change the order in which
-; machine-sink looks for sucessor blocks. By not using BFI, both G and B
+; machine-sink looks for successor blocks. By not using BFI, both G and B
; have the same loop depth and no instructions is sinked - B is selected but
; can't be used as to avoid breaking a non profitable critical edge. By using
; BFI, "mul" is sinked into the less frequent block G.
diff --git a/test/CodeGen/X86/sink-gep-before-mem-inst.ll b/test/CodeGen/X86/sink-gep-before-mem-inst.ll
new file mode 100644
index 000000000000..b9c94adda993
--- /dev/null
+++ b/test/CodeGen/X86/sink-gep-before-mem-inst.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -S -codegenprepare -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+
+define i64 @test.after(i8 addrspace(1)* readonly align 8) {
+; CHECK-LABEL: test.after
+; CHECK: sunkaddr
+entry:
+ %.0 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
+ %addr = bitcast i8 addrspace(1)* %.0 to i32 addrspace(1)*
+ br label %header
+
+header:
+ %addr.in.loop = phi i32 addrspace(1)* [ %addr, %entry ], [ %addr.after, %header ]
+ %local_2_ = phi i64 [ 0, %entry ], [ %.9, %header ]
+ %.7 = load i32, i32 addrspace(1)* %addr.in.loop, align 8
+ fence acquire
+ %.1 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
+ %addr.after = bitcast i8 addrspace(1)* %.1 to i32 addrspace(1)*
+ %.8 = sext i32 %.7 to i64
+ %.9 = add i64 %local_2_, %.8
+ %not. = icmp sgt i64 %.9, 999
+ br i1 %not., label %exit, label %header
+
+exit:
+ ret i64 %.9
+}
diff --git a/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll b/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll
new file mode 100644
index 000000000000..0461ee809efb
--- /dev/null
+++ b/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll
@@ -0,0 +1,55 @@
+; RUN: llc < %s -mtriple=x86_64-linux-android -mattr=+mmx -enable-legalize-types-checking | FileCheck %s
+;
+; D31946
+; Check that we dont end up with the ""LLVM ERROR: Cannot select" error.
+; Additionally ensure that the output code actually put fp128 values in SSE registers.
+
+declare fp128 @llvm.fabs.f128(fp128)
+declare fp128 @llvm.copysign.f128(fp128, fp128)
+
+define fp128 @TestSelect(fp128 %a, fp128 %b) {
+ %cmp = fcmp ogt fp128 %a, %b
+ %sub = fsub fp128 %a, %b
+ %res = select i1 %cmp, fp128 %sub, fp128 0xL00000000000000000000000000000000
+ ret fp128 %res
+; CHECK-LABEL: TestSelect:
+; CHECK movaps 16(%rsp), %xmm1
+; CHECK-NEXT callq __subtf3
+; CHECK-NEXT testl %ebx, %ebx
+; CHECK-NEXT jg .LBB0_2
+; CHECK-NEXT # BB#1:
+; CHECK-NEXT movaps .LCPI0_0(%rip), %xmm0
+; CHECK-NEXT .LBB0_2:
+; CHECK-NEXT addq $32, %rsp
+; CHECK-NEXT popq %rbx
+; CHECK-NEXT retq
+}
+
+define fp128 @TestFabs(fp128 %a) {
+ %res = call fp128 @llvm.fabs.f128(fp128 %a)
+ ret fp128 %res
+; CHECK-LABEL: TestFabs:
+; CHECK andps .LCPI1_0(%rip), %xmm0
+; CHECK-NEXT retq
+}
+
+define fp128 @TestCopysign(fp128 %a, fp128 %b) {
+ %res = call fp128 @llvm.copysign.f128(fp128 %a, fp128 %b)
+ ret fp128 %res
+; CHECK-LABEL: TestCopysign:
+; CHECK andps .LCPI2_1(%rip), %xmm0
+; CHECK-NEXT orps %xmm1, %xmm0
+; CHECK-NEXT retq
+}
+
+define fp128 @TestFneg(fp128 %a) {
+ %mul = fmul fp128 %a, %a
+ %res = fsub fp128 0xL00000000000000008000000000000000, %mul
+ ret fp128 %res
+; CHECK-LABEL: TestFneg:
+; CHECK movaps %xmm0, %xmm1
+; CHECK-NEXT callq __multf3
+; CHECK-NEXT xorps .LCPI3_0(%rip), %xmm0
+; CHECK-NEXT popq %rax
+; CHECK-NEXT retq
+}
diff --git a/test/CodeGen/X86/sse-schedule.ll b/test/CodeGen/X86/sse-schedule.ll
index 52e6b61aedfe..c41acd43b3ab 100644
--- a/test/CodeGen/X86/sse-schedule.ll
+++ b/test/CodeGen/X86/sse-schedule.ll
@@ -31,8 +31,8 @@ define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SANDY-LABEL: test_addps:
; SANDY: # BB#0:
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addps:
; HASWELL: # BB#0:
@@ -73,8 +73,8 @@ define float @test_addss(float %a0, float %a1, float *%a2) {
; SANDY-LABEL: test_addss:
; SANDY: # BB#0:
; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addss:
; HASWELL: # BB#0:
@@ -122,9 +122,9 @@ define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
;
; SANDY-LABEL: test_andps:
; SANDY: # BB#0:
-; SANDY-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andps:
; HASWELL: # BB#0:
@@ -176,9 +176,9 @@ define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float>
;
; SANDY-LABEL: test_andnotps:
; SANDY: # BB#0:
-; SANDY-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andnotps:
; HASWELL: # BB#0:
@@ -228,9 +228,9 @@ define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SANDY-LABEL: test_cmpps:
; SANDY: # BB#0:
; SANDY-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
-; SANDY-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmpps:
; HASWELL: # BB#0:
@@ -277,7 +277,7 @@ define float @test_cmpss(float %a0, float %a1, float *%a2) {
; SANDY: # BB#0:
; SANDY-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmpss:
; HASWELL: # BB#0:
@@ -347,16 +347,16 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SANDY-LABEL: test_comiss:
; SANDY: # BB#0:
; SANDY-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: setnp %al # sched: [1:0.33]
-; SANDY-NEXT: sete %cl # sched: [1:0.33]
+; SANDY-NEXT: setnp %al # sched: [1:1.00]
+; SANDY-NEXT: sete %cl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %cl # sched: [1:0.33]
; SANDY-NEXT: vcomiss (%rdi), %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: setnp %al # sched: [1:0.33]
-; SANDY-NEXT: sete %dl # sched: [1:0.33]
+; SANDY-NEXT: setnp %al # sched: [1:1.00]
+; SANDY-NEXT: sete %dl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %dl # sched: [1:0.33]
; SANDY-NEXT: orb %cl, %dl # sched: [1:0.33]
; SANDY-NEXT: movzbl %dl, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_comiss:
; HASWELL: # BB#0:
@@ -417,10 +417,10 @@ define float @test_cvtsi2ss(i32 %a0, i32 *%a1) {
;
; SANDY-LABEL: test_cvtsi2ss:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [4:1.00]
-; SANDY-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
+; SANDY-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:2.00]
+; SANDY-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsi2ss:
; HASWELL: # BB#0:
@@ -466,10 +466,10 @@ define float @test_cvtsi2ssq(i64 %a0, i64 *%a1) {
;
; SANDY-LABEL: test_cvtsi2ssq:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [4:1.00]
-; SANDY-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
+; SANDY-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:2.00]
+; SANDY-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsi2ssq:
; HASWELL: # BB#0:
@@ -515,10 +515,10 @@ define i32 @test_cvtss2si(float %a0, float *%a1) {
;
; SANDY-LABEL: test_cvtss2si:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtss2si %xmm0, %ecx # sched: [3:1.00]
-; SANDY-NEXT: vcvtss2si (%rdi), %eax # sched: [7:1.00]
+; SANDY-NEXT: vcvtss2si %xmm0, %ecx # sched: [5:1.00]
+; SANDY-NEXT: vcvtss2si (%rdi), %eax # sched: [10:1.00]
; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtss2si:
; HASWELL: # BB#0:
@@ -567,10 +567,10 @@ define i64 @test_cvtss2siq(float %a0, float *%a1) {
;
; SANDY-LABEL: test_cvtss2siq:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtss2si %xmm0, %rcx # sched: [3:1.00]
-; SANDY-NEXT: vcvtss2si (%rdi), %rax # sched: [7:1.00]
+; SANDY-NEXT: vcvtss2si %xmm0, %rcx # sched: [5:1.00]
+; SANDY-NEXT: vcvtss2si (%rdi), %rax # sched: [10:1.00]
; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtss2siq:
; HASWELL: # BB#0:
@@ -619,10 +619,10 @@ define i32 @test_cvttss2si(float %a0, float *%a1) {
;
; SANDY-LABEL: test_cvttss2si:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvttss2si %xmm0, %ecx # sched: [3:1.00]
-; SANDY-NEXT: vcvttss2si (%rdi), %eax # sched: [7:1.00]
+; SANDY-NEXT: vcvttss2si %xmm0, %ecx # sched: [5:1.00]
+; SANDY-NEXT: vcvttss2si (%rdi), %eax # sched: [10:1.00]
; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttss2si:
; HASWELL: # BB#0:
@@ -668,10 +668,10 @@ define i64 @test_cvttss2siq(float %a0, float *%a1) {
;
; SANDY-LABEL: test_cvttss2siq:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvttss2si %xmm0, %rcx # sched: [3:1.00]
-; SANDY-NEXT: vcvttss2si (%rdi), %rax # sched: [7:1.00]
+; SANDY-NEXT: vcvttss2si %xmm0, %rcx # sched: [5:1.00]
+; SANDY-NEXT: vcvttss2si (%rdi), %rax # sched: [10:1.00]
; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttss2siq:
; HASWELL: # BB#0:
@@ -714,9 +714,9 @@ define <4 x float> @test_divps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
;
; SANDY-LABEL: test_divps:
; SANDY: # BB#0:
-; SANDY-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [12:1.00]
-; SANDY-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
+; SANDY-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divps:
; HASWELL: # BB#0:
@@ -756,9 +756,9 @@ define float @test_divss(float %a0, float %a1, float *%a2) {
;
; SANDY-LABEL: test_divss:
; SANDY: # BB#0:
-; SANDY-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [12:1.00]
-; SANDY-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
+; SANDY-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divss:
; HASWELL: # BB#0:
@@ -799,8 +799,8 @@ define void @test_ldmxcsr(i32 %a0) {
; SANDY-LABEL: test_ldmxcsr:
; SANDY: # BB#0:
; SANDY-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
-; SANDY-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [4:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ldmxcsr:
; HASWELL: # BB#0:
@@ -843,8 +843,8 @@ define <4 x float> @test_maxps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SANDY-LABEL: test_maxps:
; SANDY: # BB#0:
; SANDY-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxps:
; HASWELL: # BB#0:
@@ -886,8 +886,8 @@ define <4 x float> @test_maxss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SANDY-LABEL: test_maxss:
; SANDY: # BB#0:
; SANDY-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxss:
; HASWELL: # BB#0:
@@ -929,8 +929,8 @@ define <4 x float> @test_minps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SANDY-LABEL: test_minps:
; SANDY: # BB#0:
; SANDY-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minps:
; HASWELL: # BB#0:
@@ -972,8 +972,8 @@ define <4 x float> @test_minss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SANDY-LABEL: test_minss:
; SANDY: # BB#0:
; SANDY-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minss:
; HASWELL: # BB#0:
@@ -1017,10 +1017,10 @@ define void @test_movaps(<4 x float> *%a0, <4 x float> *%a1) {
;
; SANDY-LABEL: test_movaps:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovaps (%rdi), %xmm0 # sched: [4:0.50]
+; SANDY-NEXT: vmovaps (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovaps %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movaps:
; HASWELL: # BB#0:
@@ -1068,7 +1068,7 @@ define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) {
; SANDY-LABEL: test_movhlps:
; SANDY: # BB#0:
; SANDY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movhlps:
; HASWELL: # BB#0:
@@ -1111,10 +1111,10 @@ define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
;
; SANDY-LABEL: test_movhps:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:1.00]
+; SANDY-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movhps:
; HASWELL: # BB#0:
@@ -1164,7 +1164,7 @@ define <4 x float> @test_movlhps(<4 x float> %a0, <4 x float> %a1) {
; SANDY: # BB#0:
; SANDY-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movlhps:
; HASWELL: # BB#0:
@@ -1206,10 +1206,10 @@ define void @test_movlps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
;
; SANDY-LABEL: test_movlps:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [5:1.00]
+; SANDY-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [7:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovlps %xmm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movlps:
; HASWELL: # BB#0:
@@ -1254,8 +1254,8 @@ define i32 @test_movmskps(<4 x float> %a0) {
;
; SANDY-LABEL: test_movmskps:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovmskps %xmm0, %eax # sched: [2:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movmskps:
; HASWELL: # BB#0:
@@ -1295,8 +1295,8 @@ define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) {
;
; SANDY-LABEL: test_movntps:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovntps %xmm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntps:
; HASWELL: # BB#0:
@@ -1335,10 +1335,10 @@ define void @test_movss_mem(float* %a0, float* %a1) {
;
; SANDY-LABEL: test_movss_mem:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovss %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movss_mem:
; HASWELL: # BB#0:
@@ -1383,8 +1383,8 @@ define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) {
;
; SANDY-LABEL: test_movss_reg:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movss_reg:
; HASWELL: # BB#0:
@@ -1423,10 +1423,10 @@ define void @test_movups(<4 x float> *%a0, <4 x float> *%a1) {
;
; SANDY-LABEL: test_movups:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [4:0.50]
+; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovups %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movups:
; HASWELL: # BB#0:
@@ -1469,8 +1469,8 @@ define <4 x float> @test_mulps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SANDY-LABEL: test_mulps:
; SANDY: # BB#0:
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulps:
; HASWELL: # BB#0:
@@ -1511,8 +1511,8 @@ define float @test_mulss(float %a0, float %a1, float *%a2) {
; SANDY-LABEL: test_mulss:
; SANDY: # BB#0:
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulss:
; HASWELL: # BB#0:
@@ -1560,9 +1560,9 @@ define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2
;
; SANDY-LABEL: test_orps:
; SANDY: # BB#0:
-; SANDY-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_orps:
; HASWELL: # BB#0:
@@ -1609,8 +1609,8 @@ define void @test_prefetchnta(i8* %a0) {
;
; SANDY-LABEL: test_prefetchnta:
; SANDY: # BB#0:
-; SANDY-NEXT: prefetchnta (%rdi) # sched: [4:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: prefetchnta (%rdi) # sched: [5:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_prefetchnta:
; HASWELL: # BB#0:
@@ -1652,10 +1652,10 @@ define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) {
;
; SANDY-LABEL: test_rcpps:
; SANDY: # BB#0:
-; SANDY-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vrcpps (%rdi), %xmm1 # sched: [9:1.00]
+; SANDY-NEXT: vrcpps %xmm0, %xmm0 # sched: [7:3.00]
+; SANDY-NEXT: vrcpps (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rcpps:
; HASWELL: # BB#0:
@@ -1708,10 +1708,10 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; SANDY-LABEL: test_rcpss:
; SANDY: # BB#0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rcpss:
; HASWELL: # BB#0:
@@ -1765,9 +1765,9 @@ define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) {
; SANDY-LABEL: test_rsqrtps:
; SANDY: # BB#0:
; SANDY-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [9:1.00]
+; SANDY-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rsqrtps:
; HASWELL: # BB#0:
@@ -1819,11 +1819,11 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
;
; SANDY-LABEL: test_rsqrtss:
; SANDY: # BB#0:
-; SANDY-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50]
-; SANDY-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [9:1.00]
+; SANDY-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
+; SANDY-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rsqrtss:
; HASWELL: # BB#0:
@@ -1875,7 +1875,7 @@ define void @test_sfence() {
; SANDY-LABEL: test_sfence:
; SANDY: # BB#0:
; SANDY-NEXT: sfence # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sfence:
; HASWELL: # BB#0:
@@ -1917,8 +1917,8 @@ define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
; SANDY-LABEL: test_shufps:
; SANDY: # BB#0:
; SANDY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
-; SANDY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shufps:
; HASWELL: # BB#0:
@@ -1962,10 +1962,10 @@ define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) {
;
; SANDY-LABEL: test_sqrtps:
; SANDY: # BB#0:
-; SANDY-NEXT: vsqrtps %xmm0, %xmm0 # sched: [15:1.00]
-; SANDY-NEXT: vsqrtps (%rdi), %xmm1 # sched: [19:1.00]
+; SANDY-NEXT: vsqrtps %xmm0, %xmm0 # sched: [14:1.00]
+; SANDY-NEXT: vsqrtps (%rdi), %xmm1 # sched: [20:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtps:
; HASWELL: # BB#0:
@@ -2017,11 +2017,11 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
;
; SANDY-LABEL: test_sqrtss:
; SANDY: # BB#0:
-; SANDY-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [19:1.00]
-; SANDY-NEXT: vmovaps (%rdi), %xmm1 # sched: [4:0.50]
-; SANDY-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [19:1.00]
+; SANDY-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [114:1.00]
+; SANDY-NEXT: vmovaps (%rdi), %xmm1 # sched: [6:0.50]
+; SANDY-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [114:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtss:
; HASWELL: # BB#0:
@@ -2067,9 +2067,9 @@ define i32 @test_stmxcsr() {
;
; SANDY-LABEL: test_stmxcsr:
; SANDY: # BB#0:
-; SANDY-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00]
-; SANDY-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [4:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
+; SANDY-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_stmxcsr:
; HASWELL: # BB#0:
@@ -2112,8 +2112,8 @@ define <4 x float> @test_subps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SANDY-LABEL: test_subps:
; SANDY: # BB#0:
; SANDY-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subps:
; HASWELL: # BB#0:
@@ -2154,8 +2154,8 @@ define float @test_subss(float %a0, float %a1, float *%a2) {
; SANDY-LABEL: test_subss:
; SANDY: # BB#0:
; SANDY-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subss:
; HASWELL: # BB#0:
@@ -2220,16 +2220,16 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SANDY-LABEL: test_ucomiss:
; SANDY: # BB#0:
; SANDY-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: setnp %al # sched: [1:0.33]
-; SANDY-NEXT: sete %cl # sched: [1:0.33]
+; SANDY-NEXT: setnp %al # sched: [1:1.00]
+; SANDY-NEXT: sete %cl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %cl # sched: [1:0.33]
; SANDY-NEXT: vucomiss (%rdi), %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: setnp %al # sched: [1:0.33]
-; SANDY-NEXT: sete %dl # sched: [1:0.33]
+; SANDY-NEXT: setnp %al # sched: [1:1.00]
+; SANDY-NEXT: sete %dl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %dl # sched: [1:0.33]
; SANDY-NEXT: orb %cl, %dl # sched: [1:0.33]
; SANDY-NEXT: movzbl %dl, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ucomiss:
; HASWELL: # BB#0:
@@ -2292,8 +2292,8 @@ define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; SANDY-LABEL: test_unpckhps:
; SANDY: # BB#0:
; SANDY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
-; SANDY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpckhps:
; HASWELL: # BB#0:
@@ -2338,8 +2338,8 @@ define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; SANDY-LABEL: test_unpcklps:
; SANDY: # BB#0:
; SANDY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
-; SANDY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpcklps:
; HASWELL: # BB#0:
@@ -2387,9 +2387,9 @@ define <4 x float> @test_xorps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
;
; SANDY-LABEL: test_xorps:
; SANDY: # BB#0:
-; SANDY-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_xorps:
; HASWELL: # BB#0:
diff --git a/test/CodeGen/X86/sse2-schedule.ll b/test/CodeGen/X86/sse2-schedule.ll
index 14c155c8c6c0..3c36b2138139 100644
--- a/test/CodeGen/X86/sse2-schedule.ll
+++ b/test/CodeGen/X86/sse2-schedule.ll
@@ -31,8 +31,8 @@ define <2 x double> @test_addpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SANDY-LABEL: test_addpd:
; SANDY: # BB#0:
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addpd:
; HASWELL: # BB#0:
@@ -73,8 +73,8 @@ define double @test_addsd(double %a0, double %a1, double *%a2) {
; SANDY-LABEL: test_addsd:
; SANDY: # BB#0:
; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsd:
; HASWELL: # BB#0:
@@ -117,10 +117,10 @@ define <2 x double> @test_andpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
;
; SANDY-LABEL: test_andpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andpd:
; HASWELL: # BB#0:
@@ -170,10 +170,10 @@ define <2 x double> @test_andnotpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
;
; SANDY-LABEL: test_andnotpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andnotpd:
; HASWELL: # BB#0:
@@ -226,9 +226,9 @@ define <2 x double> @test_cmppd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SANDY-LABEL: test_cmppd:
; SANDY: # BB#0:
; SANDY-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
-; SANDY-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmppd:
; HASWELL: # BB#0:
@@ -275,7 +275,7 @@ define double @test_cmpsd(double %a0, double %a1, double *%a2) {
; SANDY: # BB#0:
; SANDY-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmpsd:
; HASWELL: # BB#0:
@@ -345,16 +345,16 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SANDY-LABEL: test_comisd:
; SANDY: # BB#0:
; SANDY-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: setnp %al # sched: [1:0.33]
-; SANDY-NEXT: sete %cl # sched: [1:0.33]
+; SANDY-NEXT: setnp %al # sched: [1:1.00]
+; SANDY-NEXT: sete %cl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %cl # sched: [1:0.33]
; SANDY-NEXT: vcomisd (%rdi), %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: setnp %al # sched: [1:0.33]
-; SANDY-NEXT: sete %dl # sched: [1:0.33]
+; SANDY-NEXT: setnp %al # sched: [1:1.00]
+; SANDY-NEXT: sete %dl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %dl # sched: [1:0.33]
; SANDY-NEXT: orb %cl, %dl # sched: [1:0.33]
; SANDY-NEXT: movzbl %dl, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_comisd:
; HASWELL: # BB#0:
@@ -416,9 +416,9 @@ define <2 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
; SANDY-LABEL: test_cvtdq2pd:
; SANDY: # BB#0:
; SANDY-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [4:1.00]
-; SANDY-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [8:1.00]
+; SANDY-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtdq2pd:
; HASWELL: # BB#0:
@@ -467,10 +467,10 @@ define <4 x float> @test_cvtdq2ps(<4 x i32> %a0, <4 x i32> *%a1) {
;
; SANDY-LABEL: test_cvtdq2ps:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [4:1.00]
-; SANDY-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [8:1.00]
+; SANDY-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtdq2ps:
; HASWELL: # BB#0:
@@ -517,10 +517,10 @@ define <4 x i32> @test_cvtpd2dq(<2 x double> %a0, <2 x double> *%a1) {
;
; SANDY-LABEL: test_cvtpd2dq:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [4:1.00]
+; SANDY-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2dq:
; HASWELL: # BB#0:
@@ -568,10 +568,10 @@ define <4 x float> @test_cvtpd2ps(<2 x double> %a0, <2 x double> *%a1) {
;
; SANDY-LABEL: test_cvtpd2ps:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [4:1.00]
+; SANDY-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2ps:
; HASWELL: # BB#0:
@@ -620,9 +620,9 @@ define <4 x i32> @test_cvtps2dq(<4 x float> %a0, <4 x float> *%a1) {
; SANDY-LABEL: test_cvtps2dq:
; SANDY: # BB#0:
; SANDY-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtps2dq:
; HASWELL: # BB#0:
@@ -670,10 +670,10 @@ define <2 x double> @test_cvtps2pd(<4 x float> %a0, <4 x float> *%a1) {
;
; SANDY-LABEL: test_cvtps2pd:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtps2pd:
; HASWELL: # BB#0:
@@ -724,7 +724,7 @@ define i32 @test_cvtsd2si(double %a0, double *%a1) {
; SANDY-NEXT: vcvtsd2si %xmm0, %ecx # sched: [3:1.00]
; SANDY-NEXT: vcvtsd2si (%rdi), %eax # sched: [7:1.00]
; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsd2si:
; HASWELL: # BB#0:
@@ -773,10 +773,10 @@ define i64 @test_cvtsd2siq(double %a0, double *%a1) {
;
; SANDY-LABEL: test_cvtsd2siq:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtsd2si %xmm0, %rcx # sched: [3:1.00]
-; SANDY-NEXT: vcvtsd2si (%rdi), %rax # sched: [7:1.00]
+; SANDY-NEXT: vcvtsd2si %xmm0, %rcx # sched: [5:1.00]
+; SANDY-NEXT: vcvtsd2si (%rdi), %rax # sched: [10:1.00]
; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsd2siq:
; HASWELL: # BB#0:
@@ -830,10 +830,10 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; SANDY-LABEL: test_cvtsd2ss:
; SANDY: # BB#0:
; SANDY-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [4:0.50]
+; SANDY-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [6:0.50]
; SANDY-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [3:1.00]
; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsd2ss:
; HASWELL: # BB#0:
@@ -882,9 +882,9 @@ define double @test_cvtsi2sd(i32 %a0, i32 *%a1) {
; SANDY-LABEL: test_cvtsi2sd:
; SANDY: # BB#0:
; SANDY-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [4:1.00]
-; SANDY-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
+; SANDY-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsi2sd:
; HASWELL: # BB#0:
@@ -931,9 +931,9 @@ define double @test_cvtsi2sdq(i64 %a0, i64 *%a1) {
; SANDY-LABEL: test_cvtsi2sdq:
; SANDY: # BB#0:
; SANDY-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [4:1.00]
-; SANDY-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
+; SANDY-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsi2sdq:
; HASWELL: # BB#0:
@@ -985,11 +985,11 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
;
; SANDY-LABEL: test_cvtss2sd:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50]
-; SANDY-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [3:1.00]
+; SANDY-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
+; SANDY-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtss2sd:
; HASWELL: # BB#0:
@@ -1038,10 +1038,10 @@ define <4 x i32> @test_cvttpd2dq(<2 x double> %a0, <2 x double> *%a1) {
;
; SANDY-LABEL: test_cvttpd2dq:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [4:1.00]
+; SANDY-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttpd2dq:
; HASWELL: # BB#0:
@@ -1091,9 +1091,9 @@ define <4 x i32> @test_cvttps2dq(<4 x float> %a0, <4 x float> *%a1) {
; SANDY-LABEL: test_cvttps2dq:
; SANDY: # BB#0:
; SANDY-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttps2dq:
; HASWELL: # BB#0:
@@ -1139,10 +1139,10 @@ define i32 @test_cvttsd2si(double %a0, double *%a1) {
;
; SANDY-LABEL: test_cvttsd2si:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvttsd2si %xmm0, %ecx # sched: [3:1.00]
+; SANDY-NEXT: vcvttsd2si %xmm0, %ecx # sched: [5:1.00]
; SANDY-NEXT: vcvttsd2si (%rdi), %eax # sched: [7:1.00]
; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttsd2si:
; HASWELL: # BB#0:
@@ -1188,10 +1188,10 @@ define i64 @test_cvttsd2siq(double %a0, double *%a1) {
;
; SANDY-LABEL: test_cvttsd2siq:
; SANDY: # BB#0:
-; SANDY-NEXT: vcvttsd2si %xmm0, %rcx # sched: [3:1.00]
-; SANDY-NEXT: vcvttsd2si (%rdi), %rax # sched: [7:1.00]
+; SANDY-NEXT: vcvttsd2si %xmm0, %rcx # sched: [5:1.00]
+; SANDY-NEXT: vcvttsd2si (%rdi), %rax # sched: [10:1.00]
; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttsd2siq:
; HASWELL: # BB#0:
@@ -1234,9 +1234,9 @@ define <2 x double> @test_divpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
;
; SANDY-LABEL: test_divpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [12:1.00]
-; SANDY-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [22:1.00]
+; SANDY-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [28:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divpd:
; HASWELL: # BB#0:
@@ -1276,9 +1276,9 @@ define double @test_divsd(double %a0, double %a1, double *%a2) {
;
; SANDY-LABEL: test_divsd:
; SANDY: # BB#0:
-; SANDY-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [12:1.00]
-; SANDY-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [22:1.00]
+; SANDY-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [28:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divsd:
; HASWELL: # BB#0:
@@ -1322,7 +1322,7 @@ define void @test_lfence() {
; SANDY-LABEL: test_lfence:
; SANDY: # BB#0:
; SANDY-NEXT: lfence # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lfence:
; HASWELL: # BB#0:
@@ -1363,7 +1363,7 @@ define void @test_mfence() {
; SANDY-LABEL: test_mfence:
; SANDY: # BB#0:
; SANDY-NEXT: mfence # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mfence:
; HASWELL: # BB#0:
@@ -1402,7 +1402,7 @@ define void @test_maskmovdqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2) {
; SANDY-LABEL: test_maskmovdqu:
; SANDY: # BB#0:
; SANDY-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovdqu:
; HASWELL: # BB#0:
@@ -1440,8 +1440,8 @@ define <2 x double> @test_maxpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SANDY-LABEL: test_maxpd:
; SANDY: # BB#0:
; SANDY-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxpd:
; HASWELL: # BB#0:
@@ -1483,8 +1483,8 @@ define <2 x double> @test_maxsd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SANDY-LABEL: test_maxsd:
; SANDY: # BB#0:
; SANDY-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxsd:
; HASWELL: # BB#0:
@@ -1526,8 +1526,8 @@ define <2 x double> @test_minpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SANDY-LABEL: test_minpd:
; SANDY: # BB#0:
; SANDY-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minpd:
; HASWELL: # BB#0:
@@ -1569,8 +1569,8 @@ define <2 x double> @test_minsd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SANDY-LABEL: test_minsd:
; SANDY: # BB#0:
; SANDY-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minsd:
; HASWELL: # BB#0:
@@ -1614,10 +1614,10 @@ define void @test_movapd(<2 x double> *%a0, <2 x double> *%a1) {
;
; SANDY-LABEL: test_movapd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovapd (%rdi), %xmm0 # sched: [4:0.50]
+; SANDY-NEXT: vmovapd (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovapd %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movapd:
; HASWELL: # BB#0:
@@ -1662,10 +1662,10 @@ define void @test_movdqa(<2 x i64> *%a0, <2 x i64> *%a1) {
;
; SANDY-LABEL: test_movdqa:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovdqa (%rdi), %xmm0 # sched: [4:0.50]
+; SANDY-NEXT: vmovdqa (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovdqa %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movdqa:
; HASWELL: # BB#0:
@@ -1710,10 +1710,10 @@ define void @test_movdqu(<2 x i64> *%a0, <2 x i64> *%a1) {
;
; SANDY-LABEL: test_movdqu:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovdqu (%rdi), %xmm0 # sched: [4:0.50]
+; SANDY-NEXT: vmovdqu (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovdqu %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movdqu:
; HASWELL: # BB#0:
@@ -1768,12 +1768,12 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; SANDY-LABEL: test_movd:
; SANDY: # BB#0:
; SANDY-NEXT: vmovd %edi, %xmm1 # sched: [1:0.33]
-; SANDY-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SANDY-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vmovd %xmm0, %eax # sched: [1:0.33]
-; SANDY-NEXT: vmovd %xmm1, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovd %xmm0, %eax # sched: [2:1.00]
+; SANDY-NEXT: vmovd %xmm1, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movd:
; HASWELL: # BB#0:
@@ -1838,13 +1838,13 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
;
; SANDY-LABEL: test_movd_64:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovq %rdi, %xmm1 # sched: [1:0.33]
-; SANDY-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [4:0.50]
+; SANDY-NEXT: vmovq %rdi, %xmm1 # sched: [1:1.00]
+; SANDY-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [6:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SANDY-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vmovq %xmm0, %rax # sched: [1:0.33]
-; SANDY-NEXT: vmovq %xmm1, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovq %xmm0, %rax # sched: [2:1.00]
+; SANDY-NEXT: vmovq %xmm1, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movd_64:
; HASWELL: # BB#0:
@@ -1900,10 +1900,10 @@ define void @test_movhpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) {
;
; SANDY-LABEL: test_movhpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:1.00]
+; SANDY-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovhpd %xmm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movhpd:
; HASWELL: # BB#0:
@@ -1951,10 +1951,10 @@ define void @test_movlpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) {
;
; SANDY-LABEL: test_movlpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [5:1.00]
+; SANDY-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovlpd %xmm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movlpd:
; HASWELL: # BB#0:
@@ -1998,8 +1998,8 @@ define i32 @test_movmskpd(<2 x double> %a0) {
;
; SANDY-LABEL: test_movmskpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovmskpd %xmm0, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovmskpd %xmm0, %eax # sched: [2:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movmskpd:
; HASWELL: # BB#0:
@@ -2039,8 +2039,8 @@ define void @test_movntdqa(<2 x i64> %a0, <2 x i64> *%a1) {
; SANDY-LABEL: test_movntdqa:
; SANDY: # BB#0:
; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovntdq %xmm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntdqa:
; HASWELL: # BB#0:
@@ -2080,8 +2080,8 @@ define void @test_movntpd(<2 x double> %a0, <2 x double> *%a1) {
; SANDY-LABEL: test_movntpd:
; SANDY: # BB#0:
; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovntpd %xmm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntpd:
; HASWELL: # BB#0:
@@ -2123,10 +2123,10 @@ define <2 x i64> @test_movq_mem(<2 x i64> %a0, i64 *%a1) {
;
; SANDY-LABEL: test_movq_mem:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [4:0.50]
+; SANDY-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [6:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovq %xmm0, (%rdi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movq_mem:
; HASWELL: # BB#0:
@@ -2174,7 +2174,7 @@ define <2 x i64> @test_movq_reg(<2 x i64> %a0, <2 x i64> %a1) {
; SANDY: # BB#0:
; SANDY-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.33]
; SANDY-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movq_reg:
; HASWELL: # BB#0:
@@ -2216,10 +2216,10 @@ define void @test_movsd_mem(double* %a0, double* %a1) {
;
; SANDY-LABEL: test_movsd_mem:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [4:0.50]
+; SANDY-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [6:0.50]
; SANDY-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovsd %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movsd_mem:
; HASWELL: # BB#0:
@@ -2266,7 +2266,7 @@ define <2 x double> @test_movsd_reg(<2 x double> %a0, <2 x double> %a1) {
; SANDY-LABEL: test_movsd_reg:
; SANDY: # BB#0:
; SANDY-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movsd_reg:
; HASWELL: # BB#0:
@@ -2305,10 +2305,10 @@ define void @test_movupd(<2 x double> *%a0, <2 x double> *%a1) {
;
; SANDY-LABEL: test_movupd:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovupd (%rdi), %xmm0 # sched: [4:0.50]
+; SANDY-NEXT: vmovupd (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovupd %xmm0, (%rsi) # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movupd:
; HASWELL: # BB#0:
@@ -2351,8 +2351,8 @@ define <2 x double> @test_mulpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SANDY-LABEL: test_mulpd:
; SANDY: # BB#0:
; SANDY-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulpd:
; HASWELL: # BB#0:
@@ -2393,8 +2393,8 @@ define double @test_mulsd(double %a0, double %a1, double *%a2) {
; SANDY-LABEL: test_mulsd:
; SANDY: # BB#0:
; SANDY-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulsd:
; HASWELL: # BB#0:
@@ -2437,10 +2437,10 @@ define <2 x double> @test_orpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
;
; SANDY-LABEL: test_orpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_orpd:
; HASWELL: # BB#0:
@@ -2496,8 +2496,8 @@ define <8 x i16> @test_packssdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_packssdw:
; SANDY: # BB#0:
; SANDY-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packssdw:
; HASWELL: # BB#0:
@@ -2548,8 +2548,8 @@ define <16 x i8> @test_packsswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_packsswb:
; SANDY: # BB#0:
; SANDY-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packsswb:
; HASWELL: # BB#0:
@@ -2600,8 +2600,8 @@ define <16 x i8> @test_packuswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_packuswb:
; SANDY: # BB#0:
; SANDY-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packuswb:
; HASWELL: # BB#0:
@@ -2648,8 +2648,8 @@ define <16 x i8> @test_paddb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_paddb:
; SANDY: # BB#0:
; SANDY-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddb:
; HASWELL: # BB#0:
@@ -2694,8 +2694,8 @@ define <4 x i32> @test_paddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_paddd:
; SANDY: # BB#0:
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddd:
; HASWELL: # BB#0:
@@ -2736,8 +2736,8 @@ define <2 x i64> @test_paddq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SANDY-LABEL: test_paddq:
; SANDY: # BB#0:
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddq:
; HASWELL: # BB#0:
@@ -2781,9 +2781,9 @@ define <16 x i8> @test_paddsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
;
; SANDY-LABEL: test_paddsb:
; SANDY: # BB#0:
-; SANDY-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsb:
; HASWELL: # BB#0:
@@ -2828,9 +2828,9 @@ define <8 x i16> @test_paddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_paddsw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsw:
; HASWELL: # BB#0:
@@ -2876,8 +2876,8 @@ define <16 x i8> @test_paddusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_paddusb:
; SANDY: # BB#0:
; SANDY-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusb:
; HASWELL: # BB#0:
@@ -2923,8 +2923,8 @@ define <8 x i16> @test_paddusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_paddusw:
; SANDY: # BB#0:
; SANDY-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusw:
; HASWELL: # BB#0:
@@ -2969,9 +2969,9 @@ define <8 x i16> @test_paddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_paddw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddw:
; HASWELL: # BB#0:
@@ -3015,9 +3015,9 @@ define <2 x i64> @test_pand(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SANDY-LABEL: test_pand:
; SANDY: # BB#0:
; SANDY-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pand:
; HASWELL: # BB#0:
@@ -3070,9 +3070,9 @@ define <2 x i64> @test_pandn(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SANDY-LABEL: test_pandn:
; SANDY: # BB#0:
; SANDY-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pandn:
; HASWELL: # BB#0:
@@ -3122,8 +3122,8 @@ define <16 x i8> @test_pavgb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_pavgb:
; SANDY: # BB#0:
; SANDY-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgb:
; HASWELL: # BB#0:
@@ -3169,8 +3169,8 @@ define <8 x i16> @test_pavgw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_pavgw:
; SANDY: # BB#0:
; SANDY-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgw:
; HASWELL: # BB#0:
@@ -3217,9 +3217,9 @@ define <16 x i8> @test_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_pcmpeqb:
; SANDY: # BB#0:
; SANDY-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
-; SANDY-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqb:
; HASWELL: # BB#0:
@@ -3269,9 +3269,9 @@ define <4 x i32> @test_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_pcmpeqd:
; SANDY: # BB#0:
; SANDY-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
-; SANDY-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqd:
; HASWELL: # BB#0:
@@ -3321,9 +3321,9 @@ define <8 x i16> @test_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_pcmpeqw:
; SANDY: # BB#0:
; SANDY-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
-; SANDY-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqw:
; HASWELL: # BB#0:
@@ -3374,9 +3374,9 @@ define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_pcmpgtb:
; SANDY: # BB#0:
; SANDY-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
-; SANDY-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtb:
; HASWELL: # BB#0:
@@ -3427,9 +3427,9 @@ define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_pcmpgtd:
; SANDY: # BB#0:
; SANDY-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
-; SANDY-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtd:
; HASWELL: # BB#0:
@@ -3480,9 +3480,9 @@ define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_pcmpgtw:
; SANDY: # BB#0:
; SANDY-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
-; SANDY-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtw:
; HASWELL: # BB#0:
@@ -3526,9 +3526,9 @@ define i16 @test_pextrw(<8 x i16> %a0) {
;
; SANDY-LABEL: test_pextrw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50]
+; SANDY-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrw:
; HASWELL: # BB#0:
@@ -3570,9 +3570,9 @@ define <8 x i16> @test_pinsrw(<8 x i16> %a0, i16 %a1, i16 *%a2) {
;
; SANDY-LABEL: test_pinsrw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrw:
; HASWELL: # BB#0:
@@ -3620,9 +3620,9 @@ define <4 x i32> @test_pmaddwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_pmaddwd:
; SANDY: # BB#0:
-; SANDY-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddwd:
; HASWELL: # BB#0:
@@ -3669,8 +3669,8 @@ define <8 x i16> @test_pmaxsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_pmaxsw:
; SANDY: # BB#0:
; SANDY-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsw:
; HASWELL: # BB#0:
@@ -3716,8 +3716,8 @@ define <16 x i8> @test_pmaxub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_pmaxub:
; SANDY: # BB#0:
; SANDY-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxub:
; HASWELL: # BB#0:
@@ -3763,8 +3763,8 @@ define <8 x i16> @test_pminsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_pminsw:
; SANDY: # BB#0:
; SANDY-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsw:
; HASWELL: # BB#0:
@@ -3810,8 +3810,8 @@ define <16 x i8> @test_pminub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_pminub:
; SANDY: # BB#0:
; SANDY-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminub:
; HASWELL: # BB#0:
@@ -3851,8 +3851,8 @@ define i32 @test_pmovmskb(<16 x i8> %a0) {
;
; SANDY-LABEL: test_pmovmskb:
; SANDY: # BB#0:
-; SANDY-NEXT: vpmovmskb %xmm0, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmovmskb %xmm0, %eax # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovmskb:
; HASWELL: # BB#0:
@@ -3891,7 +3891,7 @@ define <8 x i16> @test_pmulhuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY: # BB#0:
; SANDY-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhuw:
; HASWELL: # BB#0:
@@ -3932,9 +3932,9 @@ define <8 x i16> @test_pmulhw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_pmulhw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhw:
; HASWELL: # BB#0:
@@ -3975,9 +3975,9 @@ define <8 x i16> @test_pmullw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_pmullw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmullw:
; HASWELL: # BB#0:
@@ -4027,7 +4027,7 @@ define <2 x i64> @test_pmuludq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY: # BB#0:
; SANDY-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuludq:
; HASWELL: # BB#0:
@@ -4073,9 +4073,9 @@ define <2 x i64> @test_por(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SANDY-LABEL: test_por:
; SANDY: # BB#0:
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_por:
; HASWELL: # BB#0:
@@ -4126,9 +4126,9 @@ define <2 x i64> @test_psadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
;
; SANDY-LABEL: test_psadbw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psadbw:
; HASWELL: # BB#0:
@@ -4176,9 +4176,9 @@ define <4 x i32> @test_pshufd(<4 x i32> %a0, <4 x i32> *%a1) {
; SANDY-LABEL: test_pshufd:
; SANDY: # BB#0:
; SANDY-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.50]
-; SANDY-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [5:0.50]
+; SANDY-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufd:
; HASWELL: # BB#0:
@@ -4226,10 +4226,10 @@ define <8 x i16> @test_pshufhw(<8 x i16> %a0, <8 x i16> *%a1) {
;
; SANDY-LABEL: test_pshufhw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.50]
-; SANDY-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [5:0.50]
-; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00]
+; SANDY-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [7:0.50]
+; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufhw:
; HASWELL: # BB#0:
@@ -4278,9 +4278,9 @@ define <8 x i16> @test_pshuflw(<8 x i16> %a0, <8 x i16> *%a1) {
; SANDY-LABEL: test_pshuflw:
; SANDY: # BB#0:
; SANDY-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.50]
-; SANDY-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [5:0.50]
-; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [7:0.50]
+; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshuflw:
; HASWELL: # BB#0:
@@ -4326,10 +4326,10 @@ define <4 x i32> @test_pslld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
;
; SANDY-LABEL: test_pslld:
; SANDY: # BB#0:
-; SANDY-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslld:
; HASWELL: # BB#0:
@@ -4378,7 +4378,7 @@ define <4 x i32> @test_pslldq(<4 x i32> %a0) {
; SANDY-LABEL: test_pslldq:
; SANDY: # BB#0:
; SANDY-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslldq:
; HASWELL: # BB#0:
@@ -4417,10 +4417,10 @@ define <2 x i64> @test_psllq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
;
; SANDY-LABEL: test_psllq:
; SANDY: # BB#0:
-; SANDY-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllq:
; HASWELL: # BB#0:
@@ -4468,10 +4468,10 @@ define <8 x i16> @test_psllw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_psllw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllw:
; HASWELL: # BB#0:
@@ -4519,10 +4519,10 @@ define <4 x i32> @test_psrad(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
;
; SANDY-LABEL: test_psrad:
; SANDY: # BB#0:
-; SANDY-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
+; SANDY-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrad:
; HASWELL: # BB#0:
@@ -4570,10 +4570,10 @@ define <8 x i16> @test_psraw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_psraw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
+; SANDY-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psraw:
; HASWELL: # BB#0:
@@ -4621,10 +4621,10 @@ define <4 x i32> @test_psrld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
;
; SANDY-LABEL: test_psrld:
; SANDY: # BB#0:
-; SANDY-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
+; SANDY-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrld:
; HASWELL: # BB#0:
@@ -4673,7 +4673,7 @@ define <4 x i32> @test_psrldq(<4 x i32> %a0) {
; SANDY-LABEL: test_psrldq:
; SANDY: # BB#0:
; SANDY-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrldq:
; HASWELL: # BB#0:
@@ -4712,10 +4712,10 @@ define <2 x i64> @test_psrlq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
;
; SANDY-LABEL: test_psrlq:
; SANDY: # BB#0:
-; SANDY-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
+; SANDY-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlq:
; HASWELL: # BB#0:
@@ -4763,10 +4763,10 @@ define <8 x i16> @test_psrlw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_psrlw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
+; SANDY-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlw:
; HASWELL: # BB#0:
@@ -4816,8 +4816,8 @@ define <16 x i8> @test_psubb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_psubb:
; SANDY: # BB#0:
; SANDY-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubb:
; HASWELL: # BB#0:
@@ -4862,8 +4862,8 @@ define <4 x i32> @test_psubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_psubd:
; SANDY: # BB#0:
; SANDY-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubd:
; HASWELL: # BB#0:
@@ -4904,8 +4904,8 @@ define <2 x i64> @test_psubq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SANDY-LABEL: test_psubq:
; SANDY: # BB#0:
; SANDY-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubq:
; HASWELL: # BB#0:
@@ -4950,8 +4950,8 @@ define <16 x i8> @test_psubsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_psubsb:
; SANDY: # BB#0:
; SANDY-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsb:
; HASWELL: # BB#0:
@@ -4997,8 +4997,8 @@ define <8 x i16> @test_psubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_psubsw:
; SANDY: # BB#0:
; SANDY-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsw:
; HASWELL: # BB#0:
@@ -5044,8 +5044,8 @@ define <16 x i8> @test_psubusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_psubusb:
; SANDY: # BB#0:
; SANDY-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusb:
; HASWELL: # BB#0:
@@ -5091,8 +5091,8 @@ define <8 x i16> @test_psubusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_psubusw:
; SANDY: # BB#0:
; SANDY-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusw:
; HASWELL: # BB#0:
@@ -5138,8 +5138,8 @@ define <8 x i16> @test_psubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_psubw:
; SANDY: # BB#0:
; SANDY-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubw:
; HASWELL: # BB#0:
@@ -5184,8 +5184,8 @@ define <16 x i8> @test_punpckhbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_punpckhbw:
; SANDY: # BB#0:
; SANDY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.50]
-; SANDY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhbw:
; HASWELL: # BB#0:
@@ -5231,9 +5231,9 @@ define <4 x i32> @test_punpckhdq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_punpckhdq:
; SANDY: # BB#0:
; SANDY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
-; SANDY-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [5:0.50]
+; SANDY-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhdq:
; HASWELL: # BB#0:
@@ -5279,10 +5279,10 @@ define <2 x i64> @test_punpckhqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2)
;
; SANDY-LABEL: test_punpckhqdq:
; SANDY: # BB#0:
-; SANDY-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
-; SANDY-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [5:0.50]
+; SANDY-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
+; SANDY-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhqdq:
; HASWELL: # BB#0:
@@ -5330,8 +5330,8 @@ define <8 x i16> @test_punpckhwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_punpckhwd:
; SANDY: # BB#0:
; SANDY-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
-; SANDY-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhwd:
; HASWELL: # BB#0:
@@ -5375,9 +5375,9 @@ define <16 x i8> @test_punpcklbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
;
; SANDY-LABEL: test_punpcklbw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
-; SANDY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
+; SANDY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklbw:
; HASWELL: # BB#0:
@@ -5423,9 +5423,9 @@ define <4 x i32> @test_punpckldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_punpckldq:
; SANDY: # BB#0:
; SANDY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
-; SANDY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [5:0.50]
+; SANDY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckldq:
; HASWELL: # BB#0:
@@ -5472,9 +5472,9 @@ define <2 x i64> @test_punpcklqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2)
; SANDY-LABEL: test_punpcklqdq:
; SANDY: # BB#0:
; SANDY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
-; SANDY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:0.50]
+; SANDY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklqdq:
; HASWELL: # BB#0:
@@ -5522,8 +5522,8 @@ define <8 x i16> @test_punpcklwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_punpcklwd:
; SANDY: # BB#0:
; SANDY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
-; SANDY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklwd:
; HASWELL: # BB#0:
@@ -5567,9 +5567,9 @@ define <2 x i64> @test_pxor(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SANDY-LABEL: test_pxor:
; SANDY: # BB#0:
; SANDY-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pxor:
; HASWELL: # BB#0:
@@ -5616,9 +5616,9 @@ define <2 x double> @test_shufpd(<2 x double> %a0, <2 x double> %a1, <2 x double
; SANDY-LABEL: test_shufpd:
; SANDY: # BB#0:
; SANDY-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
-; SANDY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [5:1.00]
+; SANDY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shufpd:
; HASWELL: # BB#0:
@@ -5665,10 +5665,10 @@ define <2 x double> @test_sqrtpd(<2 x double> %a0, <2 x double> *%a1) {
;
; SANDY-LABEL: test_sqrtpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [15:1.00]
-; SANDY-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [19:1.00]
+; SANDY-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [22:1.00]
+; SANDY-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [28:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtpd:
; HASWELL: # BB#0:
@@ -5720,11 +5720,11 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
;
; SANDY-LABEL: test_sqrtsd:
; SANDY: # BB#0:
-; SANDY-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [19:1.00]
-; SANDY-NEXT: vmovapd (%rdi), %xmm1 # sched: [4:0.50]
-; SANDY-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [19:1.00]
+; SANDY-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [21:1.00]
+; SANDY-NEXT: vmovapd (%rdi), %xmm1 # sched: [6:0.50]
+; SANDY-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [21:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtsd:
; HASWELL: # BB#0:
@@ -5771,8 +5771,8 @@ define <2 x double> @test_subpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SANDY-LABEL: test_subpd:
; SANDY: # BB#0:
; SANDY-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subpd:
; HASWELL: # BB#0:
@@ -5813,8 +5813,8 @@ define double @test_subsd(double %a0, double %a1, double *%a2) {
; SANDY-LABEL: test_subsd:
; SANDY: # BB#0:
; SANDY-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subsd:
; HASWELL: # BB#0:
@@ -5879,16 +5879,16 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; SANDY-LABEL: test_ucomisd:
; SANDY: # BB#0:
; SANDY-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: setnp %al # sched: [1:0.33]
-; SANDY-NEXT: sete %cl # sched: [1:0.33]
+; SANDY-NEXT: setnp %al # sched: [1:1.00]
+; SANDY-NEXT: sete %cl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %cl # sched: [1:0.33]
; SANDY-NEXT: vucomisd (%rdi), %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: setnp %al # sched: [1:0.33]
-; SANDY-NEXT: sete %dl # sched: [1:0.33]
+; SANDY-NEXT: setnp %al # sched: [1:1.00]
+; SANDY-NEXT: sete %dl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %dl # sched: [1:0.33]
; SANDY-NEXT: orb %cl, %dl # sched: [1:0.33]
; SANDY-NEXT: movzbl %dl, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ucomisd:
; HASWELL: # BB#0:
@@ -5950,9 +5950,9 @@ define <2 x double> @test_unpckhpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; SANDY-LABEL: test_unpckhpd:
; SANDY: # BB#0:
; SANDY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
-; SANDY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [5:1.00]
+; SANDY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpckhpd:
; HASWELL: # BB#0:
@@ -6005,9 +6005,9 @@ define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; SANDY-LABEL: test_unpcklpd:
; SANDY: # BB#0:
; SANDY-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
-; SANDY-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [5:1.00]
+; SANDY-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpcklpd:
; HASWELL: # BB#0:
@@ -6053,10 +6053,10 @@ define <2 x double> @test_xorpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
;
; SANDY-LABEL: test_xorpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
+; SANDY-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; SANDY-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_xorpd:
; HASWELL: # BB#0:
diff --git a/test/CodeGen/X86/sse3-schedule.ll b/test/CodeGen/X86/sse3-schedule.ll
index 482b2fcab642..ef1ddae4532d 100644
--- a/test/CodeGen/X86/sse3-schedule.ll
+++ b/test/CodeGen/X86/sse3-schedule.ll
@@ -31,8 +31,8 @@ define <2 x double> @test_addsubpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; SANDY-LABEL: test_addsubpd:
; SANDY: # BB#0:
; SANDY-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsubpd:
; HASWELL: # BB#0:
@@ -74,8 +74,8 @@ define <4 x float> @test_addsubps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; SANDY-LABEL: test_addsubps:
; SANDY: # BB#0:
; SANDY-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsubps:
; HASWELL: # BB#0:
@@ -116,9 +116,9 @@ define <2 x double> @test_haddpd(<2 x double> %a0, <2 x double> %a1, <2 x double
;
; SANDY-LABEL: test_haddpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
+; SANDY-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_haddpd:
; HASWELL: # BB#0:
@@ -159,9 +159,9 @@ define <4 x float> @test_haddps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
;
; SANDY-LABEL: test_haddps:
; SANDY: # BB#0:
-; SANDY-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
+; SANDY-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_haddps:
; HASWELL: # BB#0:
@@ -202,9 +202,9 @@ define <2 x double> @test_hsubpd(<2 x double> %a0, <2 x double> %a1, <2 x double
;
; SANDY-LABEL: test_hsubpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
+; SANDY-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_hsubpd:
; HASWELL: # BB#0:
@@ -245,9 +245,9 @@ define <4 x float> @test_hsubps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
;
; SANDY-LABEL: test_hsubps:
; SANDY: # BB#0:
-; SANDY-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
+; SANDY-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_hsubps:
; HASWELL: # BB#0:
@@ -287,8 +287,8 @@ define <16 x i8> @test_lddqu(i8* %a0) {
;
; SANDY-LABEL: test_lddqu:
; SANDY: # BB#0:
-; SANDY-NEXT: vlddqu (%rdi), %xmm0 # sched: [4:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vlddqu (%rdi), %xmm0 # sched: [6:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lddqu:
; HASWELL: # BB#0:
@@ -330,9 +330,9 @@ define <2 x double> @test_movddup(<2 x double> %a0, <2 x double> *%a1) {
; SANDY-LABEL: test_movddup:
; SANDY: # BB#0:
; SANDY-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
-; SANDY-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [4:0.50]
+; SANDY-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [6:0.50]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movddup:
; HASWELL: # BB#0:
@@ -380,9 +380,9 @@ define <4 x float> @test_movshdup(<4 x float> %a0, <4 x float> *%a1) {
; SANDY-LABEL: test_movshdup:
; SANDY: # BB#0:
; SANDY-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:1.00]
-; SANDY-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [4:0.50]
+; SANDY-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [6:0.50]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movshdup:
; HASWELL: # BB#0:
@@ -430,9 +430,9 @@ define <4 x float> @test_movsldup(<4 x float> %a0, <4 x float> *%a1) {
; SANDY-LABEL: test_movsldup:
; SANDY: # BB#0:
; SANDY-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:1.00]
-; SANDY-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [4:0.50]
+; SANDY-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [6:0.50]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movsldup:
; HASWELL: # BB#0:
diff --git a/test/CodeGen/X86/sse41-schedule.ll b/test/CodeGen/X86/sse41-schedule.ll
index 340b9abe8879..1ab1598fcab7 100644
--- a/test/CodeGen/X86/sse41-schedule.ll
+++ b/test/CodeGen/X86/sse41-schedule.ll
@@ -25,10 +25,10 @@ define <2 x double> @test_blendpd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
;
; SANDY-LABEL: test_blendpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
+; SANDY-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendpd:
; HASWELL: # BB#0:
@@ -65,9 +65,9 @@ define <4 x float> @test_blendps(<4 x float> %a0, <4 x float> %a1, <4 x float> *
;
; SANDY-LABEL: test_blendps:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
-; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:1.00]
+; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendps:
; HASWELL: # BB#0:
@@ -107,9 +107,9 @@ define <2 x double> @test_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
;
; SANDY-LABEL: test_blendvpd:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
-; SANDY-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
+; SANDY-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendvpd:
; HASWELL: # BB#0:
@@ -150,9 +150,9 @@ define <4 x float> @test_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float>
;
; SANDY-LABEL: test_blendvps:
; SANDY: # BB#0:
-; SANDY-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
-; SANDY-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
+; SANDY-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendvps:
; HASWELL: # BB#0:
@@ -187,9 +187,9 @@ define <2 x double> @test_dppd(<2 x double> %a0, <2 x double> %a1, <2 x double>
;
; SANDY-LABEL: test_dppd:
; SANDY: # BB#0:
-; SANDY-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [15:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_dppd:
; HASWELL: # BB#0:
@@ -224,9 +224,9 @@ define <4 x float> @test_dpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2
;
; SANDY-LABEL: test_dpps:
; SANDY: # BB#0:
-; SANDY-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [12:2.00]
; SANDY-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_dpps:
; HASWELL: # BB#0:
@@ -262,8 +262,8 @@ define <4 x float> @test_insertps(<4 x float> %a0, <4 x float> %a1, float *%a2)
; SANDY-LABEL: test_insertps:
; SANDY: # BB#0:
; SANDY-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:1.00]
-; SANDY-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [7:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_insertps:
; HASWELL: # BB#0:
@@ -296,8 +296,8 @@ define <2 x i64> @test_movntdqa(i8* %a0) {
;
; SANDY-LABEL: test_movntdqa:
; SANDY: # BB#0:
-; SANDY-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [4:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [6:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntdqa:
; HASWELL: # BB#0:
@@ -328,9 +328,9 @@ define <8 x i16> @test_mpsadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
;
; SANDY-LABEL: test_mpsadbw:
; SANDY: # BB#0:
-; SANDY-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [6:1.00]
-; SANDY-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mpsadbw:
; HASWELL: # BB#0:
@@ -367,8 +367,8 @@ define <8 x i16> @test_packusdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_packusdw:
; SANDY: # BB#0:
; SANDY-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packusdw:
; HASWELL: # BB#0:
@@ -411,8 +411,8 @@ define <16 x i8> @test_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2, <16
; SANDY-LABEL: test_pblendvb:
; SANDY: # BB#0:
; SANDY-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
-; SANDY-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendvb:
; HASWELL: # BB#0:
@@ -448,8 +448,8 @@ define <8 x i16> @test_pblendw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_pblendw:
; SANDY: # BB#0:
; SANDY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
-; SANDY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendw:
; HASWELL: # BB#0:
@@ -483,9 +483,9 @@ define <2 x i64> @test_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
;
; SANDY-LABEL: test_pcmpeqq:
; SANDY: # BB#0:
-; SANDY-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqq:
; HASWELL: # BB#0:
@@ -521,9 +521,9 @@ define i32 @test_pextrb(<16 x i8> %a0, i8 *%a1) {
;
; SANDY-LABEL: test_pextrb:
; SANDY: # BB#0:
-; SANDY-NEXT: vpextrb $3, %xmm0, %eax # sched: [1:0.50]
+; SANDY-NEXT: vpextrb $3, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrb:
; HASWELL: # BB#0:
@@ -558,9 +558,9 @@ define i32 @test_pextrd(<4 x i32> %a0, i32 *%a1) {
;
; SANDY-LABEL: test_pextrd:
; SANDY: # BB#0:
-; SANDY-NEXT: vpextrd $3, %xmm0, %eax # sched: [1:0.50]
+; SANDY-NEXT: vpextrd $3, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrd:
; HASWELL: # BB#0:
@@ -594,9 +594,9 @@ define i64 @test_pextrq(<2 x i64> %a0, <2 x i64> %a1, i64 *%a2) {
;
; SANDY-LABEL: test_pextrq:
; SANDY: # BB#0:
-; SANDY-NEXT: vpextrq $1, %xmm0, %rax # sched: [1:0.50]
+; SANDY-NEXT: vpextrq $1, %xmm0, %rax # sched: [3:1.00]
; SANDY-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrq:
; HASWELL: # BB#0:
@@ -630,9 +630,9 @@ define i32 @test_pextrw(<8 x i16> %a0, i16 *%a1) {
;
; SANDY-LABEL: test_pextrw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpextrw $3, %xmm0, %eax # sched: [1:0.50]
+; SANDY-NEXT: vpextrw $3, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrw:
; HASWELL: # BB#0:
@@ -667,9 +667,9 @@ define <8 x i16> @test_phminposuw(<8 x i16> *%a0) {
;
; SANDY-LABEL: test_phminposuw:
; SANDY: # BB#0:
-; SANDY-NEXT: vphminposuw (%rdi), %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: vphminposuw (%rdi), %xmm0 # sched: [11:1.00]
; SANDY-NEXT: vphminposuw %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phminposuw:
; HASWELL: # BB#0:
@@ -704,9 +704,9 @@ define <16 x i8> @test_pinsrb(<16 x i8> %a0, i8 %a1, i8 *%a2) {
;
; SANDY-LABEL: test_pinsrb:
; SANDY: # BB#0:
-; SANDY-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrb:
; HASWELL: # BB#0:
@@ -740,9 +740,9 @@ define <4 x i32> @test_pinsrd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
;
; SANDY-LABEL: test_pinsrd:
; SANDY: # BB#0:
-; SANDY-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrd:
; HASWELL: # BB#0:
@@ -778,10 +778,10 @@ define <2 x i64> @test_pinsrq(<2 x i64> %a0, <2 x i64> %a1, i64 %a2, i64 *%a3) {
;
; SANDY-LABEL: test_pinsrq:
; SANDY: # BB#0:
-; SANDY-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrq:
; HASWELL: # BB#0:
@@ -819,8 +819,8 @@ define <16 x i8> @test_pmaxsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_pmaxsb:
; SANDY: # BB#0:
; SANDY-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsb:
; HASWELL: # BB#0:
@@ -856,8 +856,8 @@ define <4 x i32> @test_pmaxsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_pmaxsd:
; SANDY: # BB#0:
; SANDY-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsd:
; HASWELL: # BB#0:
@@ -893,8 +893,8 @@ define <4 x i32> @test_pmaxud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_pmaxud:
; SANDY: # BB#0:
; SANDY-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxud:
; HASWELL: # BB#0:
@@ -930,8 +930,8 @@ define <8 x i16> @test_pmaxuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_pmaxuw:
; SANDY: # BB#0:
; SANDY-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxuw:
; HASWELL: # BB#0:
@@ -967,8 +967,8 @@ define <16 x i8> @test_pminsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_pminsb:
; SANDY: # BB#0:
; SANDY-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsb:
; HASWELL: # BB#0:
@@ -1004,8 +1004,8 @@ define <4 x i32> @test_pminsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_pminsd:
; SANDY: # BB#0:
; SANDY-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsd:
; HASWELL: # BB#0:
@@ -1041,8 +1041,8 @@ define <4 x i32> @test_pminud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_pminud:
; SANDY: # BB#0:
; SANDY-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminud:
; HASWELL: # BB#0:
@@ -1078,8 +1078,8 @@ define <8 x i16> @test_pminuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_pminuw:
; SANDY: # BB#0:
; SANDY-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminuw:
; HASWELL: # BB#0:
@@ -1118,9 +1118,9 @@ define <8 x i16> @test_pmovsxbw(<16 x i8> %a0, <8 x i8> *%a1) {
; SANDY-LABEL: test_pmovsxbw:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [5:0.50]
-; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [7:0.50]
+; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbw:
; HASWELL: # BB#0:
@@ -1162,9 +1162,9 @@ define <4 x i32> @test_pmovsxbd(<16 x i8> %a0, <4 x i8> *%a1) {
; SANDY-LABEL: test_pmovsxbd:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbd:
; HASWELL: # BB#0:
@@ -1206,9 +1206,9 @@ define <2 x i64> @test_pmovsxbq(<16 x i8> %a0, <2 x i8> *%a1) {
; SANDY-LABEL: test_pmovsxbq:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbq:
; HASWELL: # BB#0:
@@ -1250,9 +1250,9 @@ define <2 x i64> @test_pmovsxdq(<4 x i32> %a0, <2 x i32> *%a1) {
; SANDY-LABEL: test_pmovsxdq:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxdq:
; HASWELL: # BB#0:
@@ -1294,9 +1294,9 @@ define <4 x i32> @test_pmovsxwd(<8 x i16> %a0, <4 x i16> *%a1) {
; SANDY-LABEL: test_pmovsxwd:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxwd:
; HASWELL: # BB#0:
@@ -1338,9 +1338,9 @@ define <2 x i64> @test_pmovsxwq(<8 x i16> %a0, <2 x i16> *%a1) {
; SANDY-LABEL: test_pmovsxwq:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxwq:
; HASWELL: # BB#0:
@@ -1382,9 +1382,9 @@ define <8 x i16> @test_pmovzxbw(<16 x i8> %a0, <8 x i8> *%a1) {
; SANDY-LABEL: test_pmovzxbw:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.50]
-; SANDY-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [5:0.50]
-; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [7:0.50]
+; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbw:
; HASWELL: # BB#0:
@@ -1426,9 +1426,9 @@ define <4 x i32> @test_pmovzxbd(<16 x i8> %a0, <4 x i8> *%a1) {
; SANDY-LABEL: test_pmovzxbd:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.50]
-; SANDY-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [5:0.50]
+; SANDY-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbd:
; HASWELL: # BB#0:
@@ -1470,9 +1470,9 @@ define <2 x i64> @test_pmovzxbq(<16 x i8> %a0, <2 x i8> *%a1) {
; SANDY-LABEL: test_pmovzxbq:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50]
-; SANDY-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [5:0.50]
+; SANDY-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbq:
; HASWELL: # BB#0:
@@ -1514,9 +1514,9 @@ define <2 x i64> @test_pmovzxdq(<4 x i32> %a0, <2 x i32> *%a1) {
; SANDY-LABEL: test_pmovzxdq:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.50]
-; SANDY-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [5:0.50]
+; SANDY-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxdq:
; HASWELL: # BB#0:
@@ -1558,9 +1558,9 @@ define <4 x i32> @test_pmovzxwd(<8 x i16> %a0, <4 x i16> *%a1) {
; SANDY-LABEL: test_pmovzxwd:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50]
-; SANDY-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [5:0.50]
+; SANDY-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxwd:
; HASWELL: # BB#0:
@@ -1602,9 +1602,9 @@ define <2 x i64> @test_pmovzxwq(<8 x i16> %a0, <2 x i16> *%a1) {
; SANDY-LABEL: test_pmovzxwq:
; SANDY: # BB#0:
; SANDY-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.50]
-; SANDY-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [5:0.50]
+; SANDY-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxwq:
; HASWELL: # BB#0:
@@ -1642,9 +1642,9 @@ define <2 x i64> @test_pmuldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
;
; SANDY-LABEL: test_pmuldq:
; SANDY: # BB#0:
-; SANDY-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuldq:
; HASWELL: # BB#0:
@@ -1680,9 +1680,9 @@ define <4 x i32> @test_pmulld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
;
; SANDY-LABEL: test_pmulld:
; SANDY: # BB#0:
-; SANDY-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulld:
; HASWELL: # BB#0:
@@ -1724,13 +1724,13 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
;
; SANDY-LABEL: test_ptest:
; SANDY: # BB#0:
-; SANDY-NEXT: vptest %xmm1, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: setb %al # sched: [1:0.33]
-; SANDY-NEXT: vptest (%rdi), %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: setb %cl # sched: [1:0.33]
+; SANDY-NEXT: vptest %xmm1, %xmm0 # sched: [2:1.00]
+; SANDY-NEXT: setb %al # sched: [1:1.00]
+; SANDY-NEXT: vptest (%rdi), %xmm0 # sched: [8:1.00]
+; SANDY-NEXT: setb %cl # sched: [1:1.00]
; SANDY-NEXT: andb %al, %cl # sched: [1:0.33]
; SANDY-NEXT: movzbl %cl, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ptest:
; HASWELL: # BB#0:
@@ -1778,9 +1778,9 @@ define <2 x double> @test_roundpd(<2 x double> %a0, <2 x double> *%a1) {
; SANDY-LABEL: test_roundpd:
; SANDY: # BB#0:
; SANDY-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundpd:
; HASWELL: # BB#0:
@@ -1822,9 +1822,9 @@ define <4 x float> @test_roundps(<4 x float> %a0, <4 x float> *%a1) {
; SANDY-LABEL: test_roundps:
; SANDY: # BB#0:
; SANDY-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [7:1.00]
+; SANDY-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundps:
; HASWELL: # BB#0:
@@ -1867,9 +1867,9 @@ define <2 x double> @test_roundsd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
; SANDY-LABEL: test_roundsd:
; SANDY: # BB#0:
; SANDY-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
-; SANDY-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
+; SANDY-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundsd:
; HASWELL: # BB#0:
@@ -1912,9 +1912,9 @@ define <4 x float> @test_roundss(<4 x float> %a0, <4 x float> %a1, <4 x float> *
; SANDY-LABEL: test_roundss:
; SANDY: # BB#0:
; SANDY-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
-; SANDY-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
+; SANDY-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundss:
; HASWELL: # BB#0:
diff --git a/test/CodeGen/X86/sse42-schedule.ll b/test/CodeGen/X86/sse42-schedule.ll
index afc48bc57ee7..7ce9ffdbd0ea 100644
--- a/test/CodeGen/X86/sse42-schedule.ll
+++ b/test/CodeGen/X86/sse42-schedule.ll
@@ -26,9 +26,9 @@ define i32 @crc32_32_8(i32 %a0, i8 %a1, i8 *%a2) {
; SANDY-LABEL: crc32_32_8:
; SANDY: # BB#0:
; SANDY-NEXT: crc32b %sil, %edi # sched: [3:1.00]
-; SANDY-NEXT: crc32b (%rdx), %edi # sched: [7:1.00]
+; SANDY-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; SANDY-NEXT: movl %edi, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_32_8:
; HASWELL: # BB#0:
@@ -68,9 +68,9 @@ define i32 @crc32_32_16(i32 %a0, i16 %a1, i16 *%a2) {
; SANDY-LABEL: crc32_32_16:
; SANDY: # BB#0:
; SANDY-NEXT: crc32w %si, %edi # sched: [3:1.00]
-; SANDY-NEXT: crc32w (%rdx), %edi # sched: [7:1.00]
+; SANDY-NEXT: crc32w (%rdx), %edi # sched: [8:1.00]
; SANDY-NEXT: movl %edi, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_32_16:
; HASWELL: # BB#0:
@@ -112,7 +112,7 @@ define i32 @crc32_32_32(i32 %a0, i32 %a1, i32 *%a2) {
; SANDY-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; SANDY-NEXT: crc32l (%rdx), %edi # sched: [7:1.00]
; SANDY-NEXT: movl %edi, %eax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_32_32:
; HASWELL: # BB#0:
@@ -152,9 +152,9 @@ define i64 @crc32_64_8(i64 %a0, i8 %a1, i8 *%a2) nounwind {
; SANDY-LABEL: crc32_64_8:
; SANDY: # BB#0:
; SANDY-NEXT: crc32b %sil, %edi # sched: [3:1.00]
-; SANDY-NEXT: crc32b (%rdx), %edi # sched: [7:1.00]
+; SANDY-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; SANDY-NEXT: movq %rdi, %rax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_64_8:
; HASWELL: # BB#0:
@@ -196,7 +196,7 @@ define i64 @crc32_64_64(i64 %a0, i64 %a1, i64 *%a2) {
; SANDY-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; SANDY-NEXT: crc32q (%rdx), %rdi # sched: [7:1.00]
; SANDY-NEXT: movq %rdi, %rax # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_64_64:
; HASWELL: # BB#0:
@@ -256,7 +256,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33]
; SANDY-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SANDY-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpestri:
; HASWELL: # BB#0:
@@ -320,7 +320,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-NEXT: movl $7, %eax # sched: [1:0.33]
; SANDY-NEXT: movl $7, %edx # sched: [1:0.33]
; SANDY-NEXT: vpcmpestrm $7, (%rdi), %xmm0 # sched: [11:2.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpestrm:
; HASWELL: # BB#0:
@@ -369,12 +369,12 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
;
; SANDY-LABEL: test_pcmpistri:
; SANDY: # BB#0:
-; SANDY-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; SANDY-NEXT: movl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
; SANDY-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SANDY-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpistri:
; HASWELL: # BB#0:
@@ -416,9 +416,9 @@ define <16 x i8> @test_pcmpistrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
;
; SANDY-LABEL: test_pcmpistrm:
; SANDY: # BB#0:
-; SANDY-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [11:1.00]
-; SANDY-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [11:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [11:3.00]
+; SANDY-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [17:3.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpistrm:
; HASWELL: # BB#0:
@@ -453,9 +453,9 @@ define <2 x i64> @test_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
;
; SANDY-LABEL: test_pcmpgtq:
; SANDY: # BB#0:
-; SANDY-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtq:
; HASWELL: # BB#0:
diff --git a/test/CodeGen/X86/sse4a-schedule.ll b/test/CodeGen/X86/sse4a-schedule.ll
new file mode 100644
index 000000000000..11afdb7989f1
--- /dev/null
+++ b/test/CodeGen/X86/sse4a-schedule.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mattr=+sse4a | FileCheck %s --check-prefix=GENERIC
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=BTVER2
+
+define <2 x i64> @test_extrq(<2 x i64> %a0, <16 x i8> %a1) {
+; GENERIC-LABEL: test_extrq:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: extrq %xmm1, %xmm0
+; GENERIC-NEXT: retq
+;
+; BTVER2-LABEL: test_extrq:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: extrq %xmm1, %xmm0
+; BTVER2-NEXT: retq # sched: [4:1.00]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %a0, <16 x i8> %a1)
+ ret <2 x i64> %1
+}
+declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>)
+
+define <2 x i64> @test_extrqi(<2 x i64> %a0) {
+; GENERIC-LABEL: test_extrqi:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: extrq $2, $3, %xmm0
+; GENERIC-NEXT: retq
+;
+; BTVER2-LABEL: test_extrqi:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: extrq $2, $3, %xmm0
+; BTVER2-NEXT: retq # sched: [4:1.00]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a0, i8 3, i8 2)
+ ret <2 x i64> %1
+}
+declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8)
+
+define <2 x i64> @test_insertq(<2 x i64> %a0, <2 x i64> %a1) {
+; GENERIC-LABEL: test_insertq:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: insertq %xmm1, %xmm0
+; GENERIC-NEXT: retq
+;
+; BTVER2-LABEL: test_insertq:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: insertq %xmm1, %xmm0
+; BTVER2-NEXT: retq # sched: [4:1.00]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %1
+}
+declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_insertqi(<2 x i64> %a0, <2 x i64> %a1) {
+; GENERIC-LABEL: test_insertqi:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: insertq $6, $5, %xmm1, %xmm0
+; GENERIC-NEXT: retq
+;
+; BTVER2-LABEL: test_insertqi:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: insertq $6, $5, %xmm1, %xmm0
+; BTVER2-NEXT: retq # sched: [4:1.00]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 6)
+ ret <2 x i64> %1
+}
+declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8)
+
+define void @test_movntsd(i8* %p, <2 x double> %a) {
+; GENERIC-LABEL: test_movntsd:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: movntsd %xmm0, (%rdi)
+; GENERIC-NEXT: retq
+;
+; BTVER2-LABEL: test_movntsd:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: movntsd %xmm0, (%rdi) # sched: [1:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+ tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a)
+ ret void
+}
+declare void @llvm.x86.sse4a.movnt.sd(i8*, <2 x double>)
+
+define void @test_movntss(i8* %p, <4 x float> %a) {
+; GENERIC-LABEL: test_movntss:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: movntss %xmm0, (%rdi)
+; GENERIC-NEXT: retq
+;
+; BTVER2-LABEL: test_movntss:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: movntss %xmm0, (%rdi) # sched: [1:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+ tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a)
+ ret void
+}
+declare void @llvm.x86.sse4a.movnt.ss(i8*, <4 x float>)
+
diff --git a/test/CodeGen/X86/ssse3-schedule.ll b/test/CodeGen/X86/ssse3-schedule.ll
index 8b7a0c0ec02b..f24969a30c33 100644
--- a/test/CodeGen/X86/ssse3-schedule.ll
+++ b/test/CodeGen/X86/ssse3-schedule.ll
@@ -35,9 +35,9 @@ define <16 x i8> @test_pabsb(<16 x i8> %a0, <16 x i8> *%a1) {
; SANDY-LABEL: test_pabsb:
; SANDY: # BB#0:
; SANDY-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpabsb (%rdi), %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpabsb (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsb:
; HASWELL: # BB#0:
@@ -86,9 +86,9 @@ define <4 x i32> @test_pabsd(<4 x i32> %a0, <4 x i32> *%a1) {
; SANDY-LABEL: test_pabsd:
; SANDY: # BB#0:
; SANDY-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpabsd (%rdi), %xmm1 # sched: [5:0.50]
+; SANDY-NEXT: vpabsd (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsd:
; HASWELL: # BB#0:
@@ -136,7 +136,7 @@ define <8 x i16> @test_pabsw(<8 x i16> %a0, <8 x i16> *%a1) {
; SANDY-LABEL: test_pabsw:
; SANDY: # BB#0:
; SANDY-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsw:
; HASWELL: # BB#0:
@@ -182,8 +182,8 @@ define <8 x i16> @test_palignr(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_palignr:
; SANDY: # BB#0:
; SANDY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.50]
-; SANDY-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_palignr:
; HASWELL: # BB#0:
@@ -223,9 +223,9 @@ define <4 x i32> @test_phaddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
;
; SANDY-LABEL: test_phaddd:
; SANDY: # BB#0:
-; SANDY-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
+; SANDY-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddd:
; HASWELL: # BB#0:
@@ -274,9 +274,9 @@ define <8 x i16> @test_phaddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_phaddsw:
; SANDY: # BB#0:
-; SANDY-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
+; SANDY-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddsw:
; HASWELL: # BB#0:
@@ -317,9 +317,9 @@ define <8 x i16> @test_phaddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_phaddw:
; SANDY: # BB#0:
-; SANDY-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
+; SANDY-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddw:
; HASWELL: # BB#0:
@@ -360,9 +360,9 @@ define <4 x i32> @test_phsubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
;
; SANDY-LABEL: test_phsubd:
; SANDY: # BB#0:
-; SANDY-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
+; SANDY-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubd:
; HASWELL: # BB#0:
@@ -411,9 +411,9 @@ define <8 x i16> @test_phsubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_phsubsw:
; SANDY: # BB#0:
-; SANDY-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
+; SANDY-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubsw:
; HASWELL: # BB#0:
@@ -454,9 +454,9 @@ define <8 x i16> @test_phsubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_phsubw:
; SANDY: # BB#0:
-; SANDY-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
+; SANDY-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubw:
; HASWELL: # BB#0:
@@ -497,9 +497,9 @@ define <8 x i16> @test_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
;
; SANDY-LABEL: test_pmaddubsw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddubsw:
; HASWELL: # BB#0:
@@ -538,8 +538,8 @@ define <8 x i16> @test_pmulhrsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
;
; SANDY-LABEL: test_pmulhrsw:
; SANDY: # BB#0:
-; SANDY-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhrsw:
; HASWELL: # BB#0:
@@ -579,8 +579,8 @@ define <16 x i8> @test_pshufb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_pshufb:
; SANDY: # BB#0:
; SANDY-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufb:
; HASWELL: # BB#0:
@@ -630,8 +630,8 @@ define <16 x i8> @test_psignb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-LABEL: test_psignb:
; SANDY: # BB#0:
; SANDY-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignb:
; HASWELL: # BB#0:
@@ -681,8 +681,8 @@ define <4 x i32> @test_psignd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SANDY-LABEL: test_psignd:
; SANDY: # BB#0:
; SANDY-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignd:
; HASWELL: # BB#0:
@@ -732,8 +732,8 @@ define <8 x i16> @test_psignw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SANDY-LABEL: test_psignw:
; SANDY: # BB#0:
; SANDY-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; SANDY-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
-; SANDY-NEXT: retq # sched: [5:1.00]
+; SANDY-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignw:
; HASWELL: # BB#0:
diff --git a/test/CodeGen/X86/swizzle-avx2.ll b/test/CodeGen/X86/swizzle-avx2.ll
index 29dfa6c2dcc1..6ca9126eb09d 100644
--- a/test/CodeGen/X86/swizzle-avx2.ll
+++ b/test/CodeGen/X86/swizzle-avx2.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=avx2 | FileCheck %s
; Test that we correctly fold a shuffle that performs a swizzle of another
; shuffle node according to the rule
@@ -11,81 +12,77 @@
; Check that we produce a single vector permute / shuffle in all cases.
define <8 x i32> @swizzle_1(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,3,2,0,4,5,6,7]
+; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
%2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
ret <8 x i32> %2
}
-; CHECK-LABEL: swizzle_1
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
define <8 x i32> @swizzle_2(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
%2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
ret <8 x i32> %2
}
-; CHECK-LABEL: swizzle_2
-; CHECK: vpshufd $78
-; CHECK-NOT: vpermd
-; CHECK-NOT: vpshufd
-; CHECK: ret
-
define <8 x i32> @swizzle_3(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_3:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
%2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
ret <8 x i32> %2
}
-; CHECK-LABEL: swizzle_3
-; CHECK: vpshufd $78
-; CHECK-NOT: vpermd
-; CHECK-NOT: vpshufd
-; CHECK: ret
-
define <8 x i32> @swizzle_4(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_4:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,1,2,0,6,5,4,7]
+; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
%2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
ret <8 x i32> %2
}
-; CHECK-LABEL: swizzle_4
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
define <8 x i32> @swizzle_5(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_5:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,0,1,2,7,6,4,5]
+; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
%2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
ret <8 x i32> %2
}
-; CHECK-LABEL: swizzle_5
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
define <8 x i32> @swizzle_6(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_6:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,1,0,2,4,5,6,7]
+; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
%2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
ret <8 x i32> %2
}
-; CHECK-LABEL: swizzle_6
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
define <8 x i32> @swizzle_7(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_7:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,3,1,4,5,6,7]
+; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
%2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
ret <8 x i32> %2
}
-; CHECK-LABEL: swizzle_7
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
diff --git a/test/CodeGen/X86/tbm_patterns.ll b/test/CodeGen/X86/tbm_patterns.ll
index 80d36d5af4d2..5ce6bbd4b49e 100644
--- a/test/CodeGen/X86/tbm_patterns.ll
+++ b/test/CodeGen/X86/tbm_patterns.ll
@@ -1,253 +1,255 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+tbm < %s | FileCheck %s
-define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_bextri_u32:
- ; CHECK-NOT: mov
- ; CHECK: bextr $
- %0 = lshr i32 %a, 4
- %1 = and i32 %0, 4095
- ret i32 %1
-}
-
-define i32 @test_x86_tbm_bextri_u32_m(i32* nocapture %a) nounwind readonly {
-entry:
- ; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
- ; CHECK-NOT: mov
- ; CHECK: bextr $
- %0 = load i32, i32* %a
- %1 = lshr i32 %0, 4
- %2 = and i32 %1, 4095
- ret i32 %2
-}
-
-define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_bextri_u64:
- ; CHECK-NOT: mov
- ; CHECK: bextr $
- %0 = lshr i64 %a, 4
- %1 = and i64 %0, 4095
- ret i64 %1
-}
-
-define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind readonly {
-entry:
- ; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
- ; CHECK-NOT: mov
- ; CHECK: bextr $
- %0 = load i64, i64* %a
- %1 = lshr i64 %0, 4
- %2 = and i64 %1, 4095
- ret i64 %2
-}
-
-define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blcfill_u32:
- ; CHECK-NOT: mov
- ; CHECK: blcfill %
- %0 = add i32 %a, 1
- %1 = and i32 %0, %a
- ret i32 %1
-}
-
-define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blcfill_u64:
- ; CHECK-NOT: mov
- ; CHECK: blcfill %
- %0 = add i64 %a, 1
- %1 = and i64 %0, %a
- ret i64 %1
-}
-
-define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blci_u32:
- ; CHECK-NOT: mov
- ; CHECK: blci %
- %0 = add i32 1, %a
- %1 = xor i32 %0, -1
- %2 = or i32 %1, %a
- ret i32 %2
-}
-
-define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blci_u64:
- ; CHECK-NOT: mov
- ; CHECK: blci %
- %0 = add i64 1, %a
- %1 = xor i64 %0, -1
- %2 = or i64 %1, %a
- ret i64 %2
-}
-
-define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blci_u32_b:
- ; CHECK-NOT: mov
- ; CHECK: blci %
- %0 = sub i32 -2, %a
- %1 = or i32 %0, %a
- ret i32 %1
-}
-
-define i64 @test_x86_tbm_blci_u64_b(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blci_u64_b:
- ; CHECK-NOT: mov
- ; CHECK: blci %
- %0 = sub i64 -2, %a
- %1 = or i64 %0, %a
- ret i64 %1
-}
-
-define i32 @test_x86_tbm_blcic_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blcic_u32:
- ; CHECK-NOT: mov
- ; CHECK: blcic %
- %0 = xor i32 %a, -1
- %1 = add i32 %a, 1
- %2 = and i32 %1, %0
- ret i32 %2
-}
-
-define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blcic_u64:
- ; CHECK-NOT: mov
- ; CHECK: blcic %
- %0 = xor i64 %a, -1
- %1 = add i64 %a, 1
- %2 = and i64 %1, %0
- ret i64 %2
-}
-
-define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blcmsk_u32:
- ; CHECK-NOT: mov
- ; CHECK: blcmsk %
- %0 = add i32 %a, 1
- %1 = xor i32 %0, %a
- ret i32 %1
-}
-
-define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blcmsk_u64:
- ; CHECK-NOT: mov
- ; CHECK: blcmsk %
- %0 = add i64 %a, 1
- %1 = xor i64 %0, %a
- ret i64 %1
-}
-
-define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blcs_u32:
- ; CHECK-NOT: mov
- ; CHECK: blcs %
- %0 = add i32 %a, 1
- %1 = or i32 %0, %a
- ret i32 %1
-}
-
-define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blcs_u64:
- ; CHECK-NOT: mov
- ; CHECK: blcs %
- %0 = add i64 %a, 1
- %1 = or i64 %0, %a
- ret i64 %1
-}
-
-define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blsfill_u32:
- ; CHECK-NOT: mov
- ; CHECK: blsfill %
- %0 = add i32 %a, -1
- %1 = or i32 %0, %a
- ret i32 %1
-}
-
-define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blsfill_u64:
- ; CHECK-NOT: mov
- ; CHECK: blsfill %
- %0 = add i64 %a, -1
- %1 = or i64 %0, %a
- ret i64 %1
-}
-
-define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blsic_u32:
- ; CHECK-NOT: mov
- ; CHECK: blsic %
- %0 = xor i32 %a, -1
- %1 = add i32 %a, -1
- %2 = or i32 %0, %1
- ret i32 %2
-}
-
-define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_blsic_u64:
- ; CHECK-NOT: mov
- ; CHECK: blsic %
- %0 = xor i64 %a, -1
- %1 = add i64 %a, -1
- %2 = or i64 %0, %1
- ret i64 %2
-}
-
-define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_t1mskc_u32:
- ; CHECK-NOT: mov
- ; CHECK: t1mskc %
- %0 = xor i32 %a, -1
- %1 = add i32 %a, 1
- %2 = or i32 %0, %1
- ret i32 %2
-}
-
-define i64 @Ttest_x86_tbm_t1mskc_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_t1mskc_u64:
- ; CHECK-NOT: mov
- ; CHECK: t1mskc %
- %0 = xor i64 %a, -1
- %1 = add i64 %a, 1
- %2 = or i64 %0, %1
- ret i64 %2
-}
-
-define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_tzmsk_u32:
- ; CHECK-NOT: mov
- ; CHECK: tzmsk %
- %0 = xor i32 %a, -1
- %1 = add i32 %a, -1
- %2 = and i32 %0, %1
- ret i32 %2
-}
-
-define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind readnone {
-entry:
- ; CHECK-LABEL: test_x86_tbm_tzmsk_u64:
- ; CHECK-NOT: mov
- ; CHECK: tzmsk %
- %0 = xor i64 %a, -1
- %1 = add i64 %a, -1
- %2 = and i64 %0, %1
- ret i64 %2
+define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
+; CHECK-NEXT: retq
+ %t0 = lshr i32 %a, 4
+ %t1 = and i32 %t0, 4095
+ ret i32 %t1
+}
+
+define i32 @test_x86_tbm_bextri_u32_m(i32* nocapture %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
+; CHECK: # BB#0:
+; CHECK-NEXT: bextr $3076, (%rdi), %eax # imm = 0xC04
+; CHECK-NEXT: retq
+ %t0 = load i32, i32* %a
+ %t1 = lshr i32 %t0, 4
+ %t2 = and i32 %t1, 4095
+ ret i32 %t2
+}
+
+define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
+; CHECK-NEXT: retq
+ %t0 = lshr i64 %a, 4
+ %t1 = and i64 %t0, 4095
+ ret i64 %t1
+}
+
+define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
+; CHECK: # BB#0:
+; CHECK-NEXT: bextr $3076, (%rdi), %eax # imm = 0xC04
+; CHECK-NEXT: retq
+ %t0 = load i64, i64* %a
+ %t1 = lshr i64 %t0, 4
+ %t2 = and i64 %t1, 4095
+ ret i64 %t2
+}
+
+define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcfill_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: blcfill %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = add i32 %a, 1
+ %t1 = and i32 %t0, %a
+ ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcfill_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: blcfill %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = add i64 %a, 1
+ %t1 = and i64 %t0, %a
+ ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: blci %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = add i32 1, %a
+ %t1 = xor i32 %t0, -1
+ %t2 = or i32 %t1, %a
+ ret i32 %t2
+}
+
+define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: blci %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = add i64 1, %a
+ %t1 = xor i64 %t0, -1
+ %t2 = or i64 %t1, %a
+ ret i64 %t2
+}
+
+define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u32_b:
+; CHECK: # BB#0:
+; CHECK-NEXT: blci %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = sub i32 -2, %a
+ %t1 = or i32 %t0, %a
+ ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blci_u64_b(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u64_b:
+; CHECK: # BB#0:
+; CHECK-NEXT: blci %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = sub i64 -2, %a
+ %t1 = or i64 %t0, %a
+ ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blcic_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcic_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: blcic %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = xor i32 %a, -1
+ %t1 = add i32 %a, 1
+ %t2 = and i32 %t1, %t0
+ ret i32 %t2
+}
+
+define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcic_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: blcic %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = xor i64 %a, -1
+ %t1 = add i64 %a, 1
+ %t2 = and i64 %t1, %t0
+ ret i64 %t2
+}
+
+define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcmsk_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: blcmsk %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = add i32 %a, 1
+ %t1 = xor i32 %t0, %a
+ ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcmsk_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: blcmsk %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = add i64 %a, 1
+ %t1 = xor i64 %t0, %a
+ ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcs_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: blcs %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = add i32 %a, 1
+ %t1 = or i32 %t0, %a
+ ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcs_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: blcs %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = add i64 %a, 1
+ %t1 = or i64 %t0, %a
+ ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsfill_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: blsfill %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = add i32 %a, -1
+ %t1 = or i32 %t0, %a
+ ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsfill_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: blsfill %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = add i64 %a, -1
+ %t1 = or i64 %t0, %a
+ ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsic_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: blsic %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = xor i32 %a, -1
+ %t1 = add i32 %a, -1
+ %t2 = or i32 %t0, %t1
+ ret i32 %t2
+}
+
+define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsic_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: blsic %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = xor i64 %a, -1
+ %t1 = add i64 %a, -1
+ %t2 = or i64 %t0, %t1
+ ret i64 %t2
+}
+
+define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_t1mskc_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: t1mskc %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = xor i32 %a, -1
+ %t1 = add i32 %a, 1
+ %t2 = or i32 %t0, %t1
+ ret i32 %t2
+}
+
+define i64 @Ttest_x86_tbm_t1mskc_u64(i64 %a) nounwind {
+; CHECK-LABEL: Ttest_x86_tbm_t1mskc_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: t1mskc %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = xor i64 %a, -1
+ %t1 = add i64 %a, 1
+ %t2 = or i64 %t0, %t1
+ ret i64 %t2
+}
+
+define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_tzmsk_u32:
+; CHECK: # BB#0:
+; CHECK-NEXT: tzmsk %edi, %eax
+; CHECK-NEXT: retq
+ %t0 = xor i32 %a, -1
+ %t1 = add i32 %a, -1
+ %t2 = and i32 %t0, %t1
+ ret i32 %t2
+}
+
+define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_tzmsk_u64:
+; CHECK: # BB#0:
+; CHECK-NEXT: tzmsk %rdi, %rax
+; CHECK-NEXT: retq
+ %t0 = xor i64 %a, -1
+ %t1 = add i64 %a, -1
+ %t2 = and i64 %t0, %t1
+ ret i64 %t2
}
+
diff --git a/test/CodeGen/X86/vec-copysign.ll b/test/CodeGen/X86/vec-copysign.ll
index d363dbdaef81..1ebd7ceafced 100644
--- a/test/CodeGen/X86/vec-copysign.ll
+++ b/test/CodeGen/X86/vec-copysign.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 --check-prefix=CHECK
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=CHECK
-; Assertions have been enhanced from utils/update_test_checks.py to show the constant pool values.
+; Assertions have been enhanced from utils/update_llc_test_checks.py to show the constant pool values.
; Use a macosx triple to make sure the format of those constant strings is exact.
; CHECK: [[SIGNMASK1:L.+]]:
diff --git a/test/CodeGen/X86/vec_return.ll b/test/CodeGen/X86/vec_return.ll
index f7fcd032cab3..556e32d0c87b 100644
--- a/test/CodeGen/X86/vec_return.ll
+++ b/test/CodeGen/X86/vec_return.ll
@@ -1,16 +1,21 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s
; Without any typed operations, always use the smaller xorps.
-; CHECK: test
-; CHECK: xorps
define <2 x double> @test() {
+; CHECK-LABEL: test:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorps %xmm0, %xmm0
+; CHECK-NEXT: retl
ret <2 x double> zeroinitializer
}
; Prefer a constant pool load here.
-; CHECK: test2
-; CHECK-NOT: shuf
-; CHECK: movaps {{.*}}{{CPI|__xmm@}}
define <4 x i32> @test2() nounwind {
+; CHECK-LABEL: test2:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps {{.*#+}} xmm0 = [0,0,1,0]
+; CHECK-NEXT: retl
ret <4 x i32> < i32 0, i32 0, i32 1, i32 0 >
}
+
diff --git a/test/CodeGen/X86/vec_shift6.ll b/test/CodeGen/X86/vec_shift6.ll
index b4a58deff2f8..731760a4ea55 100644
--- a/test/CodeGen/X86/vec_shift6.ll
+++ b/test/CodeGen/X86/vec_shift6.ll
@@ -153,14 +153,16 @@ define <32 x i16> @test7(<32 x i16> %a) {
;
; AVX2-LABEL: test7:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [2,2,4,8,128,1,512,2048,2,2,4,8,128,1,512,2048]
+; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test7:
; AVX512: # BB#0:
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [2,2,4,8,128,1,512,2048,2,2,4,8,128,1,512,2048]
+; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512-NEXT: vpmullw %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX512-NEXT: retq
@@ -183,7 +185,8 @@ define <16 x i32> @test8(<16 x i32> %a) {
;
; AVX2-LABEL: test8:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,1,2,3,1,1,2,3]
+; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsllvd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/vec_unsafe-fp-math.ll b/test/CodeGen/X86/vec_unsafe-fp-math.ll
index 1c352782fca4..745316effc98 100644
--- a/test/CodeGen/X86/vec_unsafe-fp-math.ll
+++ b/test/CodeGen/X86/vec_unsafe-fp-math.ll
@@ -1,13 +1,13 @@
-; RUN: llc < %s -enable-unsafe-fp-math -enable-no-signed-zeros-fp-math -mtriple=x86_64-unknown-unknown -mcpu=corei7 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -enable-unsafe-fp-math -enable-no-signed-zeros-fp-math -mtriple=x86_64-unknown-unknown | FileCheck %s
; Make sure that vectors get the same benefits as scalars when using unsafe-fp-math.
; Subtracting zero is free.
define <4 x float> @vec_fsub_zero(<4 x float> %x) {
; CHECK-LABEL: vec_fsub_zero:
-; CHECK-NOT: subps
-; CHECK-NOT: xorps
-; CHECK: retq
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%sub = fsub <4 x float> %x, zeroinitializer
ret <4 x float> %sub
}
@@ -15,9 +15,10 @@ define <4 x float> @vec_fsub_zero(<4 x float> %x) {
; Negating doesn't require subtraction.
define <4 x float> @vec_fneg(<4 x float> %x) {
; CHECK-LABEL: vec_fneg:
-; CHECK: xorps {{.*}}LCP{{.*}}, %xmm0
-; CHECK-NOT: subps
-; CHECK-NEXT: retq
+; CHECK: # BB#0:
+; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: retq
%sub = fsub <4 x float> zeroinitializer, %x
ret <4 x float> %sub
}
+
diff --git a/test/CodeGen/X86/vector-popcnt-128.ll b/test/CodeGen/X86/vector-popcnt-128.ll
index adda108bdc77..d2f33785530b 100644
--- a/test/CodeGen/X86/vector-popcnt-128.ll
+++ b/test/CodeGen/X86/vector-popcnt-128.ll
@@ -344,20 +344,43 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE41-NEXT: psrlw $8, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: testv8i16:
-; AVX: # BB#0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsllw $8, %xmm0, %xmm1
-; AVX-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: testv8i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512VPOPCNTDQ-LABEL: testv8i16:
+; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vzeroupper
+; AVX512VPOPCNTDQ-NEXT: retq
%out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %in)
ret <8 x i16> %out
}
@@ -431,17 +454,37 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: testv16i8:
-; AVX: # BB#0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: testv16i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512VPOPCNTDQ-LABEL: testv16i8:
+; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vzeroupper
+; AVX512VPOPCNTDQ-NEXT: retq
%out = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %in)
ret <16 x i8> %out
}
diff --git a/test/CodeGen/X86/vector-popcnt-256.ll b/test/CodeGen/X86/vector-popcnt-256.ll
index accbad35e9d7..4c5de2fed385 100644
--- a/test/CodeGen/X86/vector-popcnt-256.ll
+++ b/test/CodeGen/X86/vector-popcnt-256.ll
@@ -155,17 +155,9 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv16i16:
; AVX512VPOPCNTDQ: # BB#0:
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm0, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %in)
ret <16 x i16> %out
diff --git a/test/CodeGen/X86/vector-popcnt-512.ll b/test/CodeGen/X86/vector-popcnt-512.ll
index aa50206e7a5e..a6f4e3342897 100644
--- a/test/CodeGen/X86/vector-popcnt-512.ll
+++ b/test/CodeGen/X86/vector-popcnt-512.ll
@@ -1,11 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512vpopcntdq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VPOPCNTDQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vpopcntdq | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512VPOPCNTDQ --check-prefix=AVX512VPOPCNTDQ-NOBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vpopcntdq,+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512VPOPCNTDQ --check-prefix=AVX512VPOPCNTDQ-BW
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512F-LABEL: testv8i64:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm3
@@ -28,7 +29,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: testv8i64:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -42,7 +43,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i64:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <8 x i64> @llvm.ctpop.v8i64(<8 x i64> %in)
@@ -51,7 +52,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512F-LABEL: testv16i32:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm3
@@ -82,7 +83,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: testv16i32:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -100,7 +101,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i32:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %in)
@@ -109,7 +110,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512F-LABEL: testv32i16:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -133,7 +134,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: testv32i16:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -147,36 +148,37 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
-; AVX512VPOPCNTDQ-LABEL: testv32i16:
-; AVX512VPOPCNTDQ: ## BB#0:
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm0, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm0, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm0, %ymm3, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm1, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: retq
+; AVX512VPOPCNTDQ-NOBW-LABEL: testv32i16:
+; AVX512VPOPCNTDQ-NOBW: # BB#0:
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpopcntd %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512VPOPCNTDQ-NOBW-NEXT: retq
+;
+; AVX512VPOPCNTDQ-BW-LABEL: testv32i16:
+; AVX512VPOPCNTDQ-BW: # BB#0:
+; AVX512VPOPCNTDQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VPOPCNTDQ-BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512VPOPCNTDQ-BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VPOPCNTDQ-BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512VPOPCNTDQ-BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: vpsllw $8, %zmm0, %zmm1
+; AVX512VPOPCNTDQ-BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: vpsrlw $8, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: retq
%out = call <32 x i16> @llvm.ctpop.v32i16(<32 x i16> %in)
ret <32 x i16> %out
}
define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512F-LABEL: testv64i8:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -194,7 +196,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: testv64i8:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -205,23 +207,35 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
-; AVX512VPOPCNTDQ-LABEL: testv64i8:
-; AVX512VPOPCNTDQ: ## BB#0:
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm0, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: retq
+; AVX512VPOPCNTDQ-NOBW-LABEL: testv64i8:
+; AVX512VPOPCNTDQ-NOBW: # BB#0:
+; AVX512VPOPCNTDQ-NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpand %ymm2, %ymm0, %ymm3
+; AVX512VPOPCNTDQ-NOBW-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX512VPOPCNTDQ-NOBW-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NOBW-NEXT: retq
+;
+; AVX512VPOPCNTDQ-BW-LABEL: testv64i8:
+; AVX512VPOPCNTDQ-BW: # BB#0:
+; AVX512VPOPCNTDQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VPOPCNTDQ-BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512VPOPCNTDQ-BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VPOPCNTDQ-BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512VPOPCNTDQ-BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-BW-NEXT: retq
%out = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %in)
ret <64 x i8> %out
}
diff --git a/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll b/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll
new file mode 100644
index 000000000000..af69a5ac2283
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll
@@ -0,0 +1,86 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,+sse4a| FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+;
+; Combine tests involving SSE4A target shuffles (EXTRQI,INSERTQI)
+
+declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @combine_extrqi_pshufb_16i8(<16 x i8> %a0) {
+; ALL-LABEL: combine_extrqi_pshufb_16i8:
+; ALL: # BB#0:
+; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[1,2],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; ALL-NEXT: retq
+ %1 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 2, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 255, i8 255, i8 255, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @combine_extrqi_pshufb_8i16(<8 x i16> %a0) {
+; ALL-LABEL: combine_extrqi_pshufb_8i16:
+; ALL: # BB#0:
+; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; ALL-NEXT: retq
+ %1 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 1, i32 2, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = bitcast <8 x i16> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
+ %4 = bitcast <16 x i8> %3 to <8 x i16>
+ ret <8 x i16> %4
+}
+
+define <16 x i8> @combine_insertqi_pshufb_16i8(<16 x i8> %a0, <16 x i8> %a1) {
+; SSSE3-LABEL: combine_insertqi_pshufb_16i8:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: extrq {{.*#+}} xmm1 = xmm1[0,1],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE42-LABEL: combine_insertqi_pshufb_16i8:
+; SSE42: # BB#0:
+; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; SSE42-NEXT: retq
+;
+; AVX-LABEL: combine_insertqi_pshufb_16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX-NEXT: retq
+ %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @combine_insertqi_pshufb_8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSSE3-LABEL: combine_insertqi_pshufb_8i16:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: extrq {{.*#+}} xmm1 = xmm1[0,1],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE42-LABEL: combine_insertqi_pshufb_8i16:
+; SSE42: # BB#0:
+; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; SSE42-NEXT: retq
+;
+; AVX-LABEL: combine_insertqi_pshufb_8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX-NEXT: retq
+ %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = bitcast <8 x i16> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
+ %4 = bitcast <16 x i8> %3 to <8 x i16>
+ ret <8 x i16> %4
+}
+
+define <16 x i8> @combine_pshufb_insertqi_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
+; ALL-LABEL: combine_pshufb_insertqi_pshufb:
+; ALL: # BB#0:
+; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0],xmm1[0,1],xmm0[3,4,5,6,7,u,u,u,u,u,u,u,u]
+; ALL-NEXT: retq
+ %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 17, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 7, i8 1, i8 2, i8 4, i8 3, i8 undef, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
+ ret <16 x i8> %3
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
index 546b73126039..02314857c6d7 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
@@ -445,6 +445,21 @@ define <16 x i8> @combine_pshufb_not_as_pshufw(<16 x i8> %a0) {
ret <16 x i8> %res1
}
+define <16 x i8> @combine_vpshufb_as_pshuflw_not_pslld(<16 x i8> *%a0) {
+; SSE-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
+; SSE: # BB#0:
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
+; AVX: # BB#0:
+; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
+; AVX-NEXT: retq
+ %res0 = load <16 x i8>, <16 x i8> *%a0, align 16
+ %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
+ ret <16 x i8> %res1
+}
+
define <16 x i8> @combine_pshufb_as_unary_unpcklbw(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_unary_unpcklbw:
; SSE: # BB#0:
diff --git a/test/CodeGen/X86/vector-shuffle-sse4a.ll b/test/CodeGen/X86/vector-shuffle-sse4a.ll
index 138c421215f4..e458bb6fa52f 100644
--- a/test/CodeGen/X86/vector-shuffle-sse4a.ll
+++ b/test/CodeGen/X86/vector-shuffle-sse4a.ll
@@ -1,4 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=AMD10H
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=BTVER1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=BTVER2
@@ -10,7 +11,6 @@
define <2 x i64> @extrqi_len0_idx0(<2 x i64> %a) {
; ALL-LABEL: extrqi_len0_idx0:
; ALL: # BB#0:
-; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a, i8 0, i8 0)
ret <2 x i64> %1
@@ -36,6 +36,11 @@ define <2 x i64> @extrqi_len32_idx48(<2 x i64> %a) {
}
define <16 x i8> @shuf_0zzzuuuuuuuuuuuu(<16 x i8> %a0) {
+; AMD10H-LABEL: shuf_0zzzuuuuuuuuuuuu:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuf_0zzzuuuuuuuuuuuu:
; BTVER1: # BB#0:
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -50,12 +55,17 @@ define <16 x i8> @shuf_0zzzuuuuuuuuuuuu(<16 x i8> %a0) {
}
define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(<16 x i8> %a0) {
+; AMD10H-LABEL: shuf_0zzzzzzz1zzzzzzz:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: movdqa %xmm0, %xmm1
+; AMD10H-NEXT: extrq {{.*#+}} xmm1 = xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuf_0zzzzzzz1zzzzzzz:
; BTVER1: # BB#0:
-; BTVER1-NEXT: movdqa %xmm0, %xmm1
-; BTVER1-NEXT: extrq {{.*#+}} xmm1 = xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
-; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; BTVER1-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_0zzzzzzz1zzzzzzz:
@@ -67,12 +77,17 @@ define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(<16 x i8> %a0) {
}
define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(<16 x i8> %a0) {
+; AMD10H-LABEL: shuf_2zzzzzzz3zzzzzzz:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: movdqa %xmm0, %xmm1
+; AMD10H-NEXT: extrq {{.*#+}} xmm1 = xmm1[3],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuf_2zzzzzzz3zzzzzzz:
; BTVER1: # BB#0:
-; BTVER1-NEXT: movdqa %xmm0, %xmm1
-; BTVER1-NEXT: extrq {{.*#+}} xmm1 = xmm1[3],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
-; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; BTVER1-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_2zzzzzzz3zzzzzzz:
@@ -85,6 +100,11 @@ define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(<16 x i8> %a0) {
}
define <16 x i8> @shuf_01zzuuuuuuuuuuuu(<16 x i8> %a0) {
+; AMD10H-LABEL: shuf_01zzuuuuuuuuuuuu:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuf_01zzuuuuuuuuuuuu:
; BTVER1: # BB#0:
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -99,12 +119,17 @@ define <16 x i8> @shuf_01zzuuuuuuuuuuuu(<16 x i8> %a0) {
}
define <16 x i8> @shuf_01zzzzzz23zzzzzz(<16 x i8> %a0) {
+; AMD10H-LABEL: shuf_01zzzzzz23zzzzzz:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: movdqa %xmm0, %xmm1
+; AMD10H-NEXT: extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuf_01zzzzzz23zzzzzz:
; BTVER1: # BB#0:
-; BTVER1-NEXT: movdqa %xmm0, %xmm1
-; BTVER1-NEXT: extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
-; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; BTVER1-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[2,3],zero,zero,zero,zero,zero,zero
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_01zzzzzz23zzzzzz:
@@ -143,21 +168,37 @@ define <8 x i16> @shuf_12zzuuuu(<8 x i16> %a0) {
}
define <8 x i16> @shuf_012zuuuu(<8 x i16> %a0) {
-; ALL-LABEL: shuf_012zuuuu:
-; ALL: # BB#0:
-; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; ALL-NEXT: retq
+; AMD10H-LABEL: shuf_012zuuuu:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: retq
+;
+; BTVER1-LABEL: shuf_012zuuuu:
+; BTVER1: # BB#0:
+; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; BTVER1-NEXT: retq
+;
+; BTVER2-LABEL: shuf_012zuuuu:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; BTVER2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
+; BTVER2-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x i16> %s
}
define <8 x i16> @shuf_0zzz1zzz(<8 x i16> %a0) {
+; AMD10H-LABEL: shuf_0zzz1zzz:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: movdqa %xmm0, %xmm1
+; AMD10H-NEXT: extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AMD10H-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuf_0zzz1zzz:
; BTVER1: # BB#0:
-; BTVER1-NEXT: movdqa %xmm0, %xmm1
-; BTVER1-NEXT: extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
-; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; BTVER1-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[2,3],zero,zero,zero,zero,zero,zero
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_0zzz1zzz:
@@ -169,6 +210,12 @@ define <8 x i16> @shuf_0zzz1zzz(<8 x i16> %a0) {
}
define <4 x i32> @shuf_0z1z(<4 x i32> %a0) {
+; AMD10H-LABEL: shuf_0z1z:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: pxor %xmm1, %xmm1
+; AMD10H-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuf_0z1z:
; BTVER1: # BB#0:
; BTVER1-NEXT: pxor %xmm1, %xmm1
@@ -189,10 +236,20 @@ define <4 x i32> @shuf_0z1z(<4 x i32> %a0) {
; A length of zero is equivalent to a bit length of 64.
define <2 x i64> @insertqi_len0_idx0(<2 x i64> %a, <2 x i64> %b) {
-; ALL-LABEL: insertqi_len0_idx0:
-; ALL: # BB#0:
-; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6,7],xmm0[u,u,u,u,u,u,u,u]
-; ALL-NEXT: retq
+; AMD10H-LABEL: insertqi_len0_idx0:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: movaps %xmm1, %xmm0
+; AMD10H-NEXT: retq
+;
+; BTVER1-LABEL: insertqi_len0_idx0:
+; BTVER1: # BB#0:
+; BTVER1-NEXT: movaps %xmm1, %xmm0
+; BTVER1-NEXT: retq
+;
+; BTVER2-LABEL: insertqi_len0_idx0:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps %xmm1, %xmm0
+; BTVER2-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a, <2 x i64> %b, i8 0, i8 0)
ret <2 x i64> %1
}
@@ -303,6 +360,15 @@ define <8 x i16> @shuf_089uuuuu(<8 x i16> %a0, <8 x i16> %a1) {
; Out of range.
define <16 x i8> @shuffle_8_18_uuuuuuuuuuuuuu(<16 x i8> %a, <16 x i8> %b) {
+; AMD10H-LABEL: shuffle_8_18_uuuuuuuuuuuuuu:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AMD10H-NEXT: andpd {{.*}}(%rip), %xmm0
+; AMD10H-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AMD10H-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; AMD10H-NEXT: packuswb %xmm0, %xmm0
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuffle_8_18_uuuuuuuuuuuuuu:
; BTVER1: # BB#0:
; BTVER1-NEXT: psrld $16, %xmm1
@@ -321,6 +387,13 @@ define <16 x i8> @shuffle_8_18_uuuuuuuuuuuuuu(<16 x i8> %a, <16 x i8> %b) {
}
define <16 x i8> @shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %v) {
+; AMD10H-LABEL: shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AMD10H-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AMD10H-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
; BTVER1: # BB#0:
; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,5,5,4,4,5,5,4,4,5,5,6,6,7,7]
@@ -335,6 +408,12 @@ define <16 x i8> @shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8
}
define <16 x i8> @shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %v) {
+; AMD10H-LABEL: shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
+; AMD10H: # BB#0:
+; AMD10H-NEXT: psrlq $16, %xmm0
+; AMD10H-NEXT: pand {{.*}}(%rip), %xmm0
+; AMD10H-NEXT: retq
+;
; BTVER1-LABEL: shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
; BTVER1: # BB#0:
; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u],zero,xmm0[4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
diff --git a/test/CodeGen/X86/vector-truncate-combine.ll b/test/CodeGen/X86/vector-truncate-combine.ll
index 1a6dac8fa6e4..61808b802517 100644
--- a/test/CodeGen/X86/vector-truncate-combine.ll
+++ b/test/CodeGen/X86/vector-truncate-combine.ll
@@ -11,14 +11,14 @@
; preservation of the extend/truncate operations mentioned above (2 extend and
; 3 truncate instructions).
;
-; NOTE: This operation could be collapsed in to a single truncate. Once that is done
-; this test will have to be adjusted.
+; NOTE: This operation is collapsed to a single truncate, so this test no longer covers
+; what it originally intended to.
-; CHECK: PUNPCKLBWrr
-; CHECK: PUNPCKLWDrr
-; CHECK: PACKUSWBrr
+; CHECK: MOVLHPSrr
+; CHECK: PSHUFHWri
; CHECK: PACKUSWBrr
; CHECK: PACKUSWBrr
+; CHECK: MOVPDI2DIrr
define void @test(double %vec.coerce) local_unnamed_addr {
entry:
diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll
index 4b5a00a30d09..820178d2d992 100644
--- a/test/CodeGen/X86/vector-tzcnt-128.ll
+++ b/test/CodeGen/X86/vector-tzcnt-128.ll
@@ -928,17 +928,10 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %xmm0, %xmm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
; X32-SSE-LABEL: testv8i16:
@@ -1095,17 +1088,10 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %xmm0, %xmm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
; X32-SSE-LABEL: testv8i16u:
@@ -1243,14 +1229,10 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
; X32-SSE-LABEL: testv16i8:
@@ -1384,14 +1366,10 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
; X32-SSE-LABEL: testv16i8u:
diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll
index 16192ec61a55..30e5661d5485 100644
--- a/test/CodeGen/X86/vector-tzcnt-256.ll
+++ b/test/CodeGen/X86/vector-tzcnt-256.ll
@@ -584,17 +584,9 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm0, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; X32-AVX-LABEL: testv16i16:
@@ -722,17 +714,9 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm0, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; X32-AVX-LABEL: testv16i16u:
diff --git a/test/CodeGen/X86/vector-tzcnt-512.ll b/test/CodeGen/X86/vector-tzcnt-512.ll
index 760216d561c4..3bf677aadf19 100644
--- a/test/CodeGen/X86/vector-tzcnt-512.ll
+++ b/test/CodeGen/X86/vector-tzcnt-512.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd,-avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CD
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CDBW
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=-avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512vpopcntdq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VPOPCNTDQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CD
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CDBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vpopcntdq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VPOPCNTDQ
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -34,7 +34,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv8i64:
-; AVX512CDBW: ## BB#0:
+; AVX512CDBW: # BB#0:
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -52,7 +52,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv8i64:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -70,7 +70,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i64:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -84,7 +84,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64u:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -94,7 +94,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv8i64u:
-; AVX512CDBW: ## BB#0:
+; AVX512CDBW: # BB#0:
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -104,7 +104,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv8i64u:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -122,7 +122,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i64u:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -136,7 +136,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandd %zmm1, %zmm0, %zmm0
@@ -172,7 +172,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv16i32:
-; AVX512CDBW: ## BB#0:
+; AVX512CDBW: # BB#0:
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512CDBW-NEXT: vpandd %zmm2, %zmm0, %zmm0
@@ -194,7 +194,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv16i32:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0
@@ -216,7 +216,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i32:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandd %zmm1, %zmm0, %zmm0
@@ -230,7 +230,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32u:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandd %zmm1, %zmm0, %zmm0
@@ -240,7 +240,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv16i32u:
-; AVX512CDBW: ## BB#0:
+; AVX512CDBW: # BB#0:
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandd %zmm1, %zmm0, %zmm0
@@ -250,7 +250,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv16i32u:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0
@@ -272,7 +272,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i32u:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandd %zmm1, %zmm0, %zmm0
@@ -286,7 +286,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CD-LABEL: testv32i16:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512CD-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -318,7 +318,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv32i16:
-; AVX512CDBW: ## BB#0:
+; AVX512CDBW: # BB#0:
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -338,7 +338,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv32i16:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -358,35 +358,21 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv32i16:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm5, %ymm6, %ymm5
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm0, %ymm6, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm0, %ymm5
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm0, %ymm5, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm1, %ymm6, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm2, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm1, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <32 x i16> @llvm.cttz.v32i16(<32 x i16> %in, i1 0)
ret <32 x i16> %out
@@ -394,7 +380,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CD-LABEL: testv32i16u:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512CD-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -426,7 +412,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv32i16u:
-; AVX512CDBW: ## BB#0:
+; AVX512CDBW: # BB#0:
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -446,7 +432,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv32i16u:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -466,35 +452,21 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv32i16u:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm5, %ymm6, %ymm5
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm0, %ymm6, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm0, %ymm5
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm0, %ymm5, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm1, %ymm6, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm2, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsllw $8, %ymm1, %ymm2
-; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <32 x i16> @llvm.cttz.v32i16(<32 x i16> %in, i1 -1)
ret <32 x i16> %out
@@ -502,7 +474,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CD-LABEL: testv64i8:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512CD-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -528,7 +500,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv64i8:
-; AVX512CDBW: ## BB#0:
+; AVX512CDBW: # BB#0:
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -545,7 +517,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv64i8:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -562,7 +534,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv64i8:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -592,7 +564,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CD-LABEL: testv64i8u:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512CD-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -618,7 +590,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv64i8u:
-; AVX512CDBW: ## BB#0:
+; AVX512CDBW: # BB#0:
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -635,7 +607,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv64i8u:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -652,7 +624,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv64i8u:
-; AVX512VPOPCNTDQ: ## BB#0:
+; AVX512VPOPCNTDQ: # BB#0:
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/wide-integer-cmp.ll b/test/CodeGen/X86/wide-integer-cmp.ll
index b5c7f86567a1..182d7cc73c9a 100644
--- a/test/CodeGen/X86/wide-integer-cmp.ll
+++ b/test/CodeGen/X86/wide-integer-cmp.ll
@@ -101,8 +101,8 @@ define i32 @test_wide(i128 %a, i128 %b) {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: jge .LBB4_2
; CHECK-NEXT: # BB#1: # %bb1
; CHECK-NEXT: movl $1, %eax
diff --git a/test/CodeGen/X86/x32-lea-1.ll b/test/CodeGen/X86/x32-lea-1.ll
index 2f7d71e2baf1..afe3581a85bc 100644
--- a/test/CodeGen/X86/x32-lea-1.ll
+++ b/test/CodeGen/X86/x32-lea-1.ll
@@ -1,10 +1,16 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -O0 | FileCheck %s
-; CHECK: leal {{[-0-9]*}}(%r{{s|b}}p),
-; CHECK-NOT: leal {{[-0-9]*}}(%e{{s|b}}p),
define void @foo(i32** %p) {
+; CHECK-LABEL: foo:
+; CHECK: # BB#0:
+; CHECK-NEXT: leal -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: addl $16, %eax
+; CHECK-NEXT: movl %eax, (%edi)
+; CHECK-NEXT: retq
%a = alloca i32, i32 10
%addr = getelementptr i32, i32* %a, i32 4
store i32* %addr, i32** %p
ret void
}
+
diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll
index 1263605a6dc0..5f85975fdb5c 100644
--- a/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/test/CodeGen/X86/x86-interleaved-access.ll
@@ -1,9 +1,26 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: llc -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc -mtriple=x86_64-pc-linux -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX3
define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
+; AVX1-LABEL: load_factorf64_4:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovupd (%rdi), %ymm0
+; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
+; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
+; AVX1-NEXT: vmovupd 96(%rdi), %ymm3
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
+; AVX1-NEXT: vhaddpd %ymm5, %ymm4, %ymm4
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT: vaddpd %ymm2, %ymm4, %ymm2
+; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT: vaddpd %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
; AVX-LABEL: load_factorf64_4:
; AVX: # BB#0:
; AVX-NEXT: vmovupd (%rdi), %ymm0
@@ -32,6 +49,21 @@ define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
}
define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
+; AVX1-LABEL: load_factorf64_2:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovupd (%rdi), %ymm0
+; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
+; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
+; AVX1-NEXT: vmovupd 96(%rdi), %ymm3
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT: vmulpd %ymm0, %ymm4, %ymm0
+; AVX1-NEXT: retq
+;
; AVX-LABEL: load_factorf64_2:
; AVX: # BB#0:
; AVX-NEXT: vmovupd (%rdi), %ymm0
@@ -54,6 +86,16 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
}
define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
+; AVX1-LABEL: load_factorf64_1:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovupd (%rdi), %ymm0
+; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],mem[0,1]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],mem[0,1]
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT: vmulpd %ymm0, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
; AVX-LABEL: load_factorf64_1:
; AVX: # BB#0:
; AVX-NEXT: vmovupd (%rdi), %ymm0
@@ -98,24 +140,24 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: load_factori64_4:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %ymm0
-; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
-; AVX2-NEXT: vmovdqu 64(%rdi), %ymm2
-; AVX2-NEXT: vmovdqu 96(%rdi), %ymm3
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3
-; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-NEXT: vpaddq %ymm0, %ymm3, %ymm0
-; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: retq
+; AVX-LABEL: load_factori64_4:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqu (%rdi), %ymm0
+; AVX-NEXT: vmovdqu 32(%rdi), %ymm1
+; AVX-NEXT: vmovdqu 64(%rdi), %ymm2
+; AVX-NEXT: vmovdqu 96(%rdi), %ymm3
+; AVX-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
+; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX-NEXT: vpaddq %ymm3, %ymm4, %ymm3
+; AVX-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX-NEXT: vpaddq %ymm0, %ymm3, %ymm0
+; AVX-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX-NEXT: retq
%wide.vec = load <16 x i64>, <16 x i64>* %ptr, align 16
%strided.v0 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
%strided.v1 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
@@ -128,6 +170,23 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
}
define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x double> %v3) {
+; AVX1-LABEL: store_factorf64_4:
+; AVX1: # BB#0:
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT: vmovupd %ymm0, 96(%rdi)
+; AVX1-NEXT: vmovupd %ymm3, 64(%rdi)
+; AVX1-NEXT: vmovupd %ymm4, 32(%rdi)
+; AVX1-NEXT: vmovupd %ymm2, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX-LABEL: store_factorf64_4:
; AVX: # BB#0:
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
@@ -169,22 +228,22 @@ define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
-; AVX2-LABEL: store_factori64_4:
-; AVX2: # BB#0:
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-NEXT: vmovdqu %ymm0, 96(%rdi)
-; AVX2-NEXT: vmovdqu %ymm3, 64(%rdi)
-; AVX2-NEXT: vmovdqu %ymm4, 32(%rdi)
-; AVX2-NEXT: vmovdqu %ymm2, (%rdi)
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; AVX-LABEL: store_factori64_4:
+; AVX: # BB#0:
+; AVX-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4
+; AVX-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5
+; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX-NEXT: vmovdqu %ymm0, 96(%rdi)
+; AVX-NEXT: vmovdqu %ymm3, 64(%rdi)
+; AVX-NEXT: vmovdqu %ymm4, 32(%rdi)
+; AVX-NEXT: vmovdqu %ymm2, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%s0 = shufflevector <4 x i64> %v0, <4 x i64> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%s1 = shufflevector <4 x i64> %v2, <4 x i64> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%interleaved.vec = shufflevector <8 x i64> %s0, <8 x i64> %s1, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
@@ -252,54 +311,54 @@ define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
-; AVX2-LABEL: interleaved_store_vf32_i8_stride4:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5
-; AVX2-NEXT: vpblendw {{.*#+}} ymm8 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4],ymm4[5],ymm5[6],ymm4[7],ymm5[8],ymm4[9],ymm5[10],ymm4[11],ymm5[12],ymm4[13],ymm5[14],ymm4[15]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm6
-; AVX2-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4],ymm5[5],ymm6[6],ymm5[7],ymm6[8],ymm5[9],ymm6[10],ymm5[11],ymm6[12],ymm5[13],ymm6[14],ymm5[15]
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm6
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm7, %ymm4
-; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2],ymm6[3],ymm4[4],ymm6[5],ymm4[6],ymm6[7],ymm4[8],ymm6[9],ymm4[10],ymm6[11],ymm4[12],ymm6[13],ymm4[14],ymm6[15]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
-; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; AVX2-NEXT: vmovdqa %ymm0, 96(%rdi)
-; AVX2-NEXT: vmovdqa %ymm4, 64(%rdi)
-; AVX2-NEXT: vmovdqa %ymm5, 32(%rdi)
-; AVX2-NEXT: vmovdqa %ymm8, (%rdi)
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; AVX-LABEL: interleaved_store_vf32_i8_stride4:
+; AVX: # BB#0:
+; AVX-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4
+; AVX-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; AVX-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; AVX-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5
+; AVX-NEXT: vpblendw {{.*#+}} ymm8 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4],ymm4[5],ymm5[6],ymm4[7],ymm5[8],ymm4[9],ymm5[10],ymm4[11],ymm5[12],ymm4[13],ymm5[14],ymm4[15]
+; AVX-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; AVX-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5
+; AVX-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; AVX-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm6
+; AVX-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4],ymm5[5],ymm6[6],ymm5[7],ymm6[8],ymm5[9],ymm6[10],ymm5[11],ymm6[12],ymm5[13],ymm6[14],ymm5[15]
+; AVX-NEXT: vextracti128 $1, %ymm3, %xmm3
+; AVX-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; AVX-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm6
+; AVX-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; AVX-NEXT: vinserti128 $1, %xmm4, %ymm7, %ymm4
+; AVX-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2],ymm6[3],ymm4[4],ymm6[5],ymm4[6],ymm6[7],ymm4[8],ymm6[9],ymm4[10],ymm6[11],ymm4[12],ymm6[13],ymm4[14],ymm6[15]
+; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
+; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX-NEXT: vmovdqa %ymm0, 96(%rdi)
+; AVX-NEXT: vmovdqa %ymm4, 64(%rdi)
+; AVX-NEXT: vmovdqa %ymm5, 32(%rdi)
+; AVX-NEXT: vmovdqa %ymm8, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%v1 = shufflevector <32 x i8> %x1, <32 x i8> %x2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
%v2 = shufflevector <32 x i8> %x3, <32 x i8> %x4, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
%interleaved.vec = shufflevector <64 x i8> %v1, <64 x i8> %v2, <128 x i32> <i32 0, i32 32, i32 64, i32 96, i32 1, i32 33, i32 65, i32 97, i32 2, i32 34, i32 66, i32 98, i32 3, i32 35, i32 67, i32 99, i32 4, i32 36, i32 68, i32 100, i32 5, i32 37, i32 69, i32 101, i32 6, i32 38, i32 70, i32 102, i32 7, i32 39, i32 71, i32 103, i32 8, i32 40, i32 72, i32 104, i32 9, i32 41, i32 73, i32 105, i32 10, i32 42, i32 74, i32 106, i32 11, i32 43, i32 75, i32 107, i32 12, i32 44, i32 76, i32 108, i32 13, i32 45, i32 77, i32 109, i32 14, i32 46, i32 78, i32 110, i32 15, i32 47, i32 79, i32 111, i32 16, i32 48, i32 80, i32 112, i32 17, i32 49, i32 81, i32 113, i32 18, i32 50, i32 82, i32 114, i32 19, i32 51, i32 83, i32 115, i32 20, i32 52, i32 84, i32 116, i32 21, i32 53, i32 85, i32 117, i32 22, i32 54, i32 86, i32 118, i32 23, i32 55, i32 87, i32 119, i32 24, i32 56, i32 88, i32 120, i32 25, i32 57, i32 89, i32 121, i32 26, i32 58, i32 90, i32 122, i32 27, i32 59, i32 91, i32 123, i32 28, i32 60, i32 92, i32 124, i32 29, i32 61, i32 93, i32 125, i32 30, i32 62, i32 94, i32 126, i32 31, i32 63, i32 95, i32 127>
diff --git a/test/CodeGen/X86/zext-shl.ll b/test/CodeGen/X86/zext-shl.ll
index ac3ecc85f2d9..7722f46d753a 100644
--- a/test/CodeGen/X86/zext-shl.ll
+++ b/test/CodeGen/X86/zext-shl.ll
@@ -1,25 +1,26 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s
-define i32 @t1(i8 zeroext %x) nounwind readnone ssp {
-entry:
+define i32 @t1(i8 zeroext %x) nounwind {
; CHECK-LABEL: t1:
-; CHECK: shll
-; CHECK-NOT: movzwl
-; CHECK: ret
- %0 = zext i8 %x to i16
- %1 = shl i16 %0, 5
- %2 = zext i16 %1 to i32
- ret i32 %2
+; CHECK: # BB#0:
+; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: shll $5, %eax
+; CHECK-NEXT: retl
+ %t0 = zext i8 %x to i16
+ %t1 = shl i16 %t0, 5
+ %t2 = zext i16 %t1 to i32
+ ret i32 %t2
}
-define i32 @t2(i8 zeroext %x) nounwind readnone ssp {
-entry:
+define i32 @t2(i8 zeroext %x) nounwind {
; CHECK-LABEL: t2:
-; CHECK: shrl
-; CHECK-NOT: movzwl
-; CHECK: ret
- %0 = zext i8 %x to i16
- %1 = lshr i16 %0, 3
- %2 = zext i16 %1 to i32
- ret i32 %2
+; CHECK: # BB#0:
+; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: shrl $3, %eax
+; CHECK-NEXT: retl
+ %t0 = zext i8 %x to i16
+ %t1 = lshr i16 %t0, 3
+ %t2 = zext i16 %t1 to i32
+ ret i32 %t2
}
diff --git a/test/CodeGen/X86/zext-trunc.ll b/test/CodeGen/X86/zext-trunc.ll
index 32afd6b96a8b..e51a77abc92e 100644
--- a/test/CodeGen/X86/zext-trunc.ll
+++ b/test/CodeGen/X86/zext-trunc.ll
@@ -1,11 +1,12 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; rdar://7570931
define i64 @foo(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: foo:
-; CHECK: leal
-; CHECK-NOT: movl
-; CHECK: ret
+; CHECK: # BB#0:
+; CHECK-NEXT: leal (%rdi,%rsi), %eax
+; CHECK-NEXT: retq
%c = add i64 %a, %b
%d = trunc i64 %c to i32
%e = zext i32 %d to i64
diff --git a/test/DebugInfo/COFF/asm.ll b/test/DebugInfo/COFF/asm.ll
index 3d245e9d396d..a55eec2782a6 100644
--- a/test/DebugInfo/COFF/asm.ll
+++ b/test/DebugInfo/COFF/asm.ll
@@ -35,7 +35,7 @@
; OBJ32: CodeViewDebugInfo [
; OBJ32: Subsection [
; OBJ32-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ32: ProcStart {
+; OBJ32: {{.*}}Proc{{.*}}Sym {
; OBJ32: CodeSize: 0x6
; OBJ32: DisplayName: f
; OBJ32: LinkageName: _f
@@ -94,13 +94,13 @@
; OBJ64: ]
; OBJ64: Subsection [
; OBJ64-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ64: ProcStart {
+; OBJ64: {{.*}}Proc{{.*}}Sym {
; OBJ64: CodeSize: 0xE
; OBJ64: DisplayName: f
; OBJ64: LinkageName: f
; OBJ64: }
; OBJ64-NEXT: ProcEnd {
-; OBJ64-NEXT: }
+; OBJ64: }
; OBJ64-NEXT: ]
; OBJ64: FunctionLineTable [
; OBJ64-NEXT: Name: f
diff --git a/test/DebugInfo/COFF/cpp-mangling.ll b/test/DebugInfo/COFF/cpp-mangling.ll
index 8d1a136ec5fc..6f8b5a21ffba 100644
--- a/test/DebugInfo/COFF/cpp-mangling.ll
+++ b/test/DebugInfo/COFF/cpp-mangling.ll
@@ -12,12 +12,12 @@
; fn_tmpl<int, foo::bar>();
; }
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: FunctionType: bar ({{.*}})
; CHECK: DisplayName: foo::bar{{$}}
; CHECK-NEXT: LinkageName: ?bar@foo@@YAHH@Z
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: FunctionType: fn_tmpl ({{.*}})
; CHECK: DisplayName: foo::fn_tmpl<int,&foo::bar>
; CHECK-NEXT: LinkageName: ??$fn_tmpl@H$1?bar@foo@@YAHH@Z@foo@@YAXXZ
diff --git a/test/DebugInfo/COFF/fp-stack.ll b/test/DebugInfo/COFF/fp-stack.ll
index 4a30a49a3768..8061e2ee23d1 100644
--- a/test/DebugInfo/COFF/fp-stack.ll
+++ b/test/DebugInfo/COFF/fp-stack.ll
@@ -11,7 +11,7 @@ entry:
}
; ASM: .cv_def_range Lfunc_begin0 Lfunc_end0, "A\021\200\000\000\000"
-; OBJ: DefRangeRegister {
+; OBJ: DefRangeRegisterSym {
; OBJ: Register: 128
; OBJ: MayHaveNoName: 0
; OBJ: LocalVariableAddrRange {
diff --git a/test/DebugInfo/COFF/globals.ll b/test/DebugInfo/COFF/globals.ll
index 0d1b9413e3d8..f5d6906e181e 100644
--- a/test/DebugInfo/COFF/globals.ll
+++ b/test/DebugInfo/COFF/globals.ll
@@ -81,13 +81,13 @@
; OBJ: DisplayName: first
; OBJ: LinkageName: ?first@@3HA
; OBJ: }
-; OBJ: ThreadLocalDataSym {
+; OBJ: GlobalTLS {
; OBJ: DataOffset: ?middle@@3PEBHEB+0x0
; OBJ: Type: const int* (0x1001)
; OBJ: DisplayName: middle
; OBJ: LinkageName: ?middle@@3PEBHEB
; OBJ: }
-; OBJ: DataSym {
+; OBJ: GlobalData {
; OBJ: Kind: S_GDATA32 (0x110D)
; OBJ: DataOffset: ?last@@3HA+0x0
; OBJ: Type: int (0x74)
@@ -101,7 +101,7 @@
; OBJ: Magic: 0x4
; OBJ: Subsection [
; OBJ: SubSectionType: Symbols (0xF1)
-; OBJ: DataSym {
+; OBJ: GlobalData {
; OBJ: DataOffset: ?comdat@?$A@X@@2HB+0x0
; OBJ: Type: const int (0x1000)
; OBJ: DisplayName: comdat
diff --git a/test/DebugInfo/COFF/inlining-files.ll b/test/DebugInfo/COFF/inlining-files.ll
index a6f5d281eb09..e3e616b618da 100644
--- a/test/DebugInfo/COFF/inlining-files.ll
+++ b/test/DebugInfo/COFF/inlining-files.ll
@@ -18,10 +18,10 @@
; OBJ: Subsection [
; OBJ: SubSectionType: Symbols (0xF1)
-; OBJ: ProcStart {
+; OBJ: {{.*}}Proc{{.*}}Sym {
; OBJ: DisplayName: f
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: PtrParent: 0x0
; OBJ: PtrEnd: 0x0
; OBJ: Inlinee: file_change (0x1002)
diff --git a/test/DebugInfo/COFF/inlining-header.ll b/test/DebugInfo/COFF/inlining-header.ll
index 0981825e0d3b..7e19f14716f0 100644
--- a/test/DebugInfo/COFF/inlining-header.ll
+++ b/test/DebugInfo/COFF/inlining-header.ll
@@ -63,7 +63,7 @@
; OBJ: Subsection [
; OBJ: SubSectionType: Symbols (0xF1)
-; OBJ: ProcStart {
+; OBJ: {{.*}}Proc{{.*}}Sym {
; OBJ: Kind: S_GPROC32_ID (0x1147)
; OBJ: FunctionType: main (0x1005)
; OBJ: CodeOffset: _main+0x0
@@ -74,8 +74,8 @@
; OBJ: LinkageName: _main
; OBJ: }
-; Previously, g's InlineSite referenced t.h, which was wasteful.
-; OBJ: InlineSite {
+; Previously, g's InlineSiteSym referenced t.h, which was wasteful.
+; OBJ: InlineSiteSym {
; OBJ: Inlinee: g (0x1002)
; OBJ: BinaryAnnotations [
; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0x6, LineOffset: 1}
@@ -85,7 +85,7 @@
; OBJ-NEXT: ]
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: Inlinee: f (0x1003)
; OBJ: BinaryAnnotations [
; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0xD, LineOffset: 1}
diff --git a/test/DebugInfo/COFF/inlining-levels.ll b/test/DebugInfo/COFF/inlining-levels.ll
index 0c5c73c8fdbe..7f93dbb850a2 100644
--- a/test/DebugInfo/COFF/inlining-levels.ll
+++ b/test/DebugInfo/COFF/inlining-levels.ll
@@ -18,14 +18,14 @@
; OBJ: Subsection [
; OBJ: SubSectionType: Symbols (0xF1)
-; OBJ: ProcStart {
-; OBJ: InlineSite {
+; OBJ: {{.*}}Proc{{.*}}Sym {
+; OBJ: InlineSiteSym {
; OBJ: Inlinee: h (0x1002)
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: Inlinee: g (0x1003)
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: Inlinee: f (0x1004)
; OBJ: }
; OBJ: InlineSiteEnd {
diff --git a/test/DebugInfo/COFF/inlining-same-name.ll b/test/DebugInfo/COFF/inlining-same-name.ll
index 4a9c9924135d..3700b7060a7a 100644
--- a/test/DebugInfo/COFF/inlining-same-name.ll
+++ b/test/DebugInfo/COFF/inlining-same-name.ll
@@ -14,15 +14,15 @@
; CHECK: CodeViewDebugInfo [
; CHECK: Section: .debug$S
; CHECK: Subsection [
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: DisplayName: main
; CHECK: }
-; CHECK: InlineSite {
+; CHECK: InlineSiteSym {
; CHECK: Inlinee: same_name (0x1002)
; CHECK: }
; CHECK: InlineSiteEnd {
; CHECK: }
-; CHECK: InlineSite {
+; CHECK: InlineSiteSym {
; CHECK: Inlinee: same_name (0x1002)
; CHECK: }
; CHECK: InlineSiteEnd {
diff --git a/test/DebugInfo/COFF/inlining.ll b/test/DebugInfo/COFF/inlining.ll
index 76b8f8c88ee2..ddfd5e056a1b 100644
--- a/test/DebugInfo/COFF/inlining.ll
+++ b/test/DebugInfo/COFF/inlining.ll
@@ -166,7 +166,7 @@
; OBJ: ]
; OBJ: Subsection [
; OBJ: SubSectionType: Symbols (0xF1)
-; OBJ: ProcStart {
+; OBJ: {{.*}}Proc{{.*}}Sym {
; OBJ: PtrParent: 0x0
; OBJ: PtrEnd: 0x0
; OBJ: PtrNext: 0x0
@@ -181,7 +181,7 @@
; OBJ: DisplayName: baz
; OBJ: LinkageName: ?baz@@YAXXZ
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: PtrParent: 0x0
; OBJ: PtrEnd: 0x0
; OBJ: Inlinee: bar (0x1002)
@@ -193,7 +193,7 @@
; OBJ-NEXT: ChangeCodeLength: 0x7
; OBJ: ]
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: PtrParent: 0x0
; OBJ: PtrEnd: 0x0
; OBJ: Inlinee: foo (0x1003)
diff --git a/test/DebugInfo/COFF/int8-char-type.ll b/test/DebugInfo/COFF/int8-char-type.ll
index 82972a452819..2e4395b4a599 100644
--- a/test/DebugInfo/COFF/int8-char-type.ll
+++ b/test/DebugInfo/COFF/int8-char-type.ll
@@ -5,7 +5,7 @@
; DW_ATE_[un]signed encoding for all integer types if they don't have distinct
; integer types for characters types. This was PR30552.
-; CHECK-LABEL: DataSym {
+; CHECK-LABEL: GlobalData {
; CHECK-NEXT: Kind: S_GDATA32 (0x110D)
; CHECK-NEXT: DataOffset:
; CHECK-NEXT: Type: signed char (0x10)
@@ -13,7 +13,7 @@
; CHECK-NEXT: LinkageName: x
; CHECK-NEXT: }
-; CHECK-LABEL: DataSym {
+; CHECK-LABEL: GlobalData {
; CHECK-NEXT: Kind: S_GDATA32 (0x110D)
; CHECK-NEXT: DataOffset:
; CHECK-NEXT: Type: unsigned char (0x20)
diff --git a/test/DebugInfo/COFF/local-constant.ll b/test/DebugInfo/COFF/local-constant.ll
index bf8ba8446a6d..c99dd32e22e4 100644
--- a/test/DebugInfo/COFF/local-constant.ll
+++ b/test/DebugInfo/COFF/local-constant.ll
@@ -11,10 +11,11 @@
; FIXME: Find a way to describe variables optimized to constants.
-; OBJ: ProcStart {
+; OBJ: {{.*}}Proc{{.*}}Sym {
; OBJ: DisplayName: constant_var
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
+; OBJ-NEXT: Kind:
; OBJ-NEXT: Type: int (0x74)
; OBJ-NEXT: Flags [ (0x100)
; OBJ-NEXT: IsOptimizedOut (0x100)
diff --git a/test/DebugInfo/COFF/local-variable-gap.ll b/test/DebugInfo/COFF/local-variable-gap.ll
index a2d05eaa03e4..ab38bbd8c13f 100644
--- a/test/DebugInfo/COFF/local-variable-gap.ll
+++ b/test/DebugInfo/COFF/local-variable-gap.ll
@@ -66,12 +66,13 @@
; ASM: .short 2 # Record length
; ASM: .short 4431 # Record kind: S_PROC_ID_END
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: VarName: p
; OBJ: }
-; OBJ-NOT: Local {
-; OBJ: DefRangeRegister {
+; OBJ-NOT: LocalSym {
+; OBJ: DefRangeRegisterSym {
+; OBJ-NEXT: Kind:
; OBJ-NEXT: Register: 23
; OBJ-NEXT: MayHaveNoName: 0
; OBJ-NEXT: LocalVariableAddrRange {
diff --git a/test/DebugInfo/COFF/local-variables.ll b/test/DebugInfo/COFF/local-variables.ll
index 249b6e1103db..f7087f76f4c1 100644
--- a/test/DebugInfo/COFF/local-variables.ll
+++ b/test/DebugInfo/COFF/local-variables.ll
@@ -99,18 +99,18 @@
; OBJ: Subsection [
; OBJ: SubSectionType: Symbols (0xF1)
-; OBJ: ProcStart {
+; OBJ: {{.*}}Proc{{.*}}Sym {
; OBJ: DisplayName: f
; OBJ: LinkageName: f
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x1)
; OBJ: IsParameter (0x1)
; OBJ: ]
; OBJ: VarName: param
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 335
; OBJ: HasSpilledUDTMember: No
; OBJ: OffsetInParent: 0
@@ -121,13 +121,13 @@
; OBJ: Range: 0x4F
; OBJ: }
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x0)
; OBJ: ]
; OBJ: VarName: a
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 335
; OBJ: HasSpilledUDTMember: No
; OBJ: OffsetInParent: 0
@@ -138,13 +138,13 @@
; OBJ: Range: 0x21
; OBJ: }
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x0)
; OBJ: ]
; OBJ: VarName: b
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 335
; OBJ: HasSpilledUDTMember: No
; OBJ: OffsetInParent: 0
@@ -155,7 +155,7 @@
; OBJ: Range: 0x1F
; OBJ: }
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: PtrParent: 0x0
; OBJ: PtrEnd: 0x0
; OBJ: Inlinee: will_be_inlined (0x1002)
@@ -166,13 +166,13 @@
; OBJ: ChangeCodeLength: 0xC
; OBJ: ]
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x0)
; OBJ: ]
; OBJ: VarName: v
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 335
; OBJ: HasSpilledUDTMember: No
; OBJ: OffsetInParent: 0
@@ -185,7 +185,7 @@
; OBJ: }
; OBJ: InlineSiteEnd {
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: PtrParent: 0x0
; OBJ: PtrEnd: 0x0
; OBJ: Inlinee: will_be_inlined (0x1002)
@@ -196,13 +196,13 @@
; OBJ: ChangeCodeLength: 0xA
; OBJ: ]
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x0)
; OBJ: ]
; OBJ: VarName: v
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 335
; OBJ: HasSpilledUDTMember: No
; OBJ: OffsetInParent: 0
diff --git a/test/DebugInfo/COFF/long-name.ll b/test/DebugInfo/COFF/long-name.ll
index 998d77f7ca06..65bd4c16f750 100644
--- a/test/DebugInfo/COFF/long-name.ll
+++ b/test/DebugInfo/COFF/long-name.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -filetype=obj | llvm-readobj -codeview | FileCheck %s
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: Kind: S_GPROC32_ID (0x1147)
; CHECK: FunctionType: {{A+}} (0x1002)
; CHECK: CodeOffset: f+0x0
diff --git a/test/DebugInfo/COFF/multifile.ll b/test/DebugInfo/COFF/multifile.ll
index 5e53fa57acc4..8af99a6063e6 100644
--- a/test/DebugInfo/COFF/multifile.ll
+++ b/test/DebugInfo/COFF/multifile.ll
@@ -43,13 +43,13 @@
; OBJ32: ]
; OBJ32: Subsection [
; OBJ32-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ32: ProcStart {
+; OBJ32: {{.*}}Proc{{.*}}Sym {
; OBJ32: CodeSize: 0x10
; OBJ32: DisplayName: f
; OBJ32: LinkageName: _f
; OBJ32: }
; OBJ32-NEXT: ProcEnd {
-; OBJ32-NEXT: }
+; OBJ32: }
; OBJ32-NEXT: ]
; OBJ32: FunctionLineTable [
; OBJ32-NEXT: Name: _f
@@ -115,13 +115,13 @@
; OBJ64: ]
; OBJ64: Subsection [
; OBJ64-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ64: ProcStart {
+; OBJ64: {{.*}}Proc{{.*}}Sym {
; OBJ64: CodeSize: 0x18
; OBJ64: DisplayName: f
; OBJ64: LinkageName: f
; OBJ64: }
; OBJ64-NEXT: ProcEnd {
-; OBJ64-NEXT: }
+; OBJ64: }
; OBJ64-NEXT: ]
; OBJ64: FunctionLineTable [
; OBJ64-NEXT: Name: f
diff --git a/test/DebugInfo/COFF/multifunction.ll b/test/DebugInfo/COFF/multifunction.ll
index a6290e8f021d..87db2a20eaa6 100644
--- a/test/DebugInfo/COFF/multifunction.ll
+++ b/test/DebugInfo/COFF/multifunction.ll
@@ -145,7 +145,7 @@
; OBJ32: ]
; OBJ32: Subsection [
; OBJ32-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ32: ProcStart {
+; OBJ32: {{.*}}Proc{{.*}}Sym {
; OBJ32: Kind: S_LPROC32_ID (0x1146)
; OBJ32: CodeSize: 0x6
; OBJ32: DisplayName: x
@@ -159,7 +159,7 @@
; OBJ32: ]
; OBJ32: Subsection [
; OBJ32-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ32: ProcStart {
+; OBJ32: {{.*}}Proc{{.*}}Sym {
; OBJ32: Kind: S_GPROC32_ID (0x1147)
; OBJ32: CodeSize: 0x6
; OBJ32: DisplayName: y
@@ -173,7 +173,7 @@
; OBJ32: ]
; OBJ32: Subsection [
; OBJ32-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ32: ProcStart {
+; OBJ32: {{.*}}Proc{{.*}}Sym {
; OBJ32: Kind: S_GPROC32_ID (0x1147)
; OBJ32: CodeSize: 0x10
; OBJ32: DisplayName: f
@@ -419,7 +419,7 @@
; OBJ64-NEXT: ]
; OBJ64: Subsection [
; OBJ64-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ64: ProcStart {
+; OBJ64: {{.*}}Proc{{.*}}Sym {
; OBJ64: Kind: S_LPROC32_ID (0x1146)
; OBJ64: CodeSize: 0xE
; OBJ64: DisplayName: x
@@ -433,7 +433,7 @@
; OBJ64: ]
; OBJ64: Subsection [
; OBJ64-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ64: ProcStart {
+; OBJ64: {{.*}}Proc{{.*}}Sym {
; OBJ64: Kind: S_GPROC32_ID (0x1147)
; OBJ64: CodeSize: 0xE
; OBJ64: DisplayName: y
@@ -447,7 +447,7 @@
; OBJ64: ]
; OBJ64: Subsection [
; OBJ64-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ64: ProcStart {
+; OBJ64: {{.*}}Proc{{.*}}Sym {
; OBJ64: Kind: S_GPROC32_ID (0x1147)
; OBJ64: CodeSize: 0x18
; OBJ64: DisplayName: f
diff --git a/test/DebugInfo/COFF/pieces.ll b/test/DebugInfo/COFF/pieces.ll
index 60330e057726..098f2ae62f0b 100644
--- a/test/DebugInfo/COFF/pieces.ll
+++ b/test/DebugInfo/COFF/pieces.ll
@@ -105,21 +105,21 @@
; ASM: .cv_def_range [[oy_start]] [[oy_end]], "C\021\027\000\000\000\004\000\000\000"
-; OBJ-LABEL: ProcStart {
+; OBJ-LABEL: {{.*}}Proc{{.*}}Sym {
; OBJ: Kind: S_GPROC32_ID (0x1147)
; OBJ: DisplayName: loop_csr
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: VarName: o
; OBJ: }
-; OBJ: DefRangeSubfieldRegister {
+; OBJ: DefRangeSubfieldRegisterSym {
; OBJ: Register: 24
; OBJ: MayHaveNoName: 0
; OBJ: OffsetInParent: 0
; OBJ: LocalVariableAddrRange {
; OBJ: }
; OBJ: }
-; OBJ: DefRangeSubfieldRegister {
+; OBJ: DefRangeSubfieldRegisterSym {
; OBJ: Register: 23
; OBJ: MayHaveNoName: 0
; OBJ: OffsetInParent: 4
@@ -135,14 +135,14 @@
; ASM: .asciz "o"
; ASM: .cv_def_range .Lfunc_begin1 .Lfunc_end1, "C\021\022\000\000\000\004\000\000\000"
-; OBJ-LABEL: ProcStart {
+; OBJ-LABEL: {{.*}}Proc{{.*}}Sym {
; OBJ: Kind: S_GPROC32_ID (0x1147)
; OBJ: DisplayName: pad_right
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: VarName: o
; OBJ: }
-; OBJ: DefRangeSubfieldRegister {
+; OBJ: DefRangeSubfieldRegisterSym {
; OBJ: Register: 18
; OBJ: MayHaveNoName: 0
; OBJ: OffsetInParent: 4
@@ -158,14 +158,14 @@
; ASM: .asciz "o"
; ASM: .cv_def_range .Lfunc_begin2 .Lfunc_end2, "C\021\022\000\000\000\000\000\000\000"
-; OBJ-LABEL: ProcStart {
+; OBJ-LABEL: {{.*}}Proc{{.*}}Sym {
; OBJ: Kind: S_GPROC32_ID (0x1147)
; OBJ: DisplayName: pad_left
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: VarName: o
; OBJ: }
-; OBJ: DefRangeSubfieldRegister {
+; OBJ: DefRangeSubfieldRegisterSym {
; OBJ: Register: 18
; OBJ: MayHaveNoName: 0
; OBJ: OffsetInParent: 0
@@ -185,17 +185,17 @@
; ASM: .asciz "p"
; ASM: .cv_def_range [[p_start]] .Lfunc_end3, "C\021\021\000\000\000\004\000\000\000"
-; OBJ-LABEL: ProcStart {
+; OBJ-LABEL: {{.*}}Proc{{.*}}Sym {
; OBJ: Kind: S_GPROC32_ID (0x1147)
; OBJ: DisplayName: nested
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: VarName: o
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: VarName: p
; OBJ: }
-; OBJ: DefRangeSubfieldRegister {
+; OBJ: DefRangeSubfieldRegisterSym {
; OBJ: Register: 17
; OBJ: MayHaveNoName: 0
; OBJ: OffsetInParent: 4
@@ -212,14 +212,14 @@
; ASM: .asciz "o"
; ASM: .cv_def_range [[spill_o_x_start]] [[spill_o_x_end]], "E\021O\001A\000$\000\000\000"
-; OBJ-LABEL: ProcStart {
+; OBJ-LABEL: {{.*}}Proc{{.*}}Sym {
; OBJ: Kind: S_GPROC32_ID (0x1147)
; OBJ: DisplayName: bitpiece_spill
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: VarName: o
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 335
; OBJ: HasSpilledUDTMember: Yes
; OBJ: OffsetInParent: 4
diff --git a/test/DebugInfo/COFF/register-variables.ll b/test/DebugInfo/COFF/register-variables.ll
index d0ca5ca2afad..f8cd5c4fc3c1 100644
--- a/test/DebugInfo/COFF/register-variables.ll
+++ b/test/DebugInfo/COFF/register-variables.ll
@@ -81,17 +81,17 @@
; OBJ: Subsection [
; OBJ: SubSectionType: Symbols (0xF1)
-; OBJ: ProcStart {
+; OBJ: {{.*}}Proc{{.*}}Sym {
; OBJ: DisplayName: f
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x1)
; OBJ: IsParameter (0x1)
; OBJ: ]
; OBJ: VarName: p
; OBJ: }
-; OBJ: DefRangeRegister {
+; OBJ: DefRangeRegisterSym {
; OBJ: Register: 18
; OBJ: LocalVariableAddrRange {
; OBJ: OffsetStart: .text+0x0
@@ -99,7 +99,7 @@
; OBJ: Range: 0x7
; OBJ: }
; OBJ: }
-; OBJ: DefRangeRegister {
+; OBJ: DefRangeRegisterSym {
; OBJ: Register: 23
; OBJ: LocalVariableAddrRange {
; OBJ: OffsetStart: .text+0x7
@@ -107,13 +107,13 @@
; OBJ: Range: 0x18
; OBJ: }
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x0)
; OBJ: ]
; OBJ: VarName: a
; OBJ: }
-; OBJ: DefRangeRegister {
+; OBJ: DefRangeRegisterSym {
; OBJ: Register: 17
; OBJ: LocalVariableAddrRange {
; OBJ: OffsetStart: .text+0xC
@@ -121,13 +121,13 @@
; OBJ: Range: 0x6
; OBJ: }
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x0)
; OBJ: ]
; OBJ: VarName: c
; OBJ: }
-; OBJ: DefRangeRegister {
+; OBJ: DefRangeRegisterSym {
; OBJ: Register: 17
; OBJ: LocalVariableAddrRange {
; OBJ: OffsetStart: .text+0xC
@@ -135,13 +135,13 @@
; OBJ: Range: 0x4
; OBJ: }
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x0)
; OBJ: ]
; OBJ: VarName: b
; OBJ: }
-; OBJ: DefRangeRegister {
+; OBJ: DefRangeRegisterSym {
; OBJ: Register: 17
; OBJ: MayHaveNoName: 0
; OBJ: OffsetStart: .text+0x12
@@ -149,19 +149,19 @@
; OBJ: Range: 0x6
; OBJ: }
; OBJ: }
-; OBJ: InlineSite {
+; OBJ: InlineSiteSym {
; OBJ: PtrParent: 0x0
; OBJ: PtrEnd: 0x0
; OBJ: Inlinee: inlineinc (0x1002)
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x1)
; OBJ: IsParameter (0x1)
; OBJ: ]
; OBJ: VarName: a
; OBJ: }
-; OBJ: DefRangeRegister {
+; OBJ: DefRangeRegisterSym {
; OBJ: Register: 17
; OBJ: LocalVariableAddrRange {
; OBJ: OffsetStart: .text+0xC
@@ -169,13 +169,13 @@
; OBJ: Range: 0x6
; OBJ: }
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x0)
; OBJ: ]
; OBJ: VarName: b
; OBJ: }
-; OBJ: DefRangeRegister {
+; OBJ: DefRangeRegisterSym {
; OBJ: Register: 17
; OBJ: LocalVariableAddrRange {
; OBJ: OffsetStart: .text+0x12
diff --git a/test/DebugInfo/COFF/simple.ll b/test/DebugInfo/COFF/simple.ll
index 3a0b1c9fa7cd..50d121be6942 100644
--- a/test/DebugInfo/COFF/simple.ll
+++ b/test/DebugInfo/COFF/simple.ll
@@ -77,13 +77,13 @@
; OBJ32-NEXT: ]
; OBJ32: Subsection [
; OBJ32-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ32: ProcStart {
+; OBJ32: {{.*}}Proc{{.*}}Sym {
; OBJ32: CodeSize: 0x6
; OBJ32: DisplayName: f
; OBJ32: LinkageName: _f
; OBJ32: }
; OBJ32-NEXT: ProcEnd {
-; OBJ32-NEXT: }
+; OBJ32: }
; OBJ32-NEXT: ]
; OBJ32: FunctionLineTable [
; OBJ32-NEXT: Name: _f
@@ -174,13 +174,13 @@
; OBJ64-NEXT: ]
; OBJ64: Subsection [
; OBJ64-NEXT: SubSectionType: Symbols (0xF1)
-; OBJ64: ProcStart {
+; OBJ64: {{.*}}Proc{{.*}}Sym {
; OBJ64: CodeSize: 0xE
; OBJ64: DisplayName: f
; OBJ64: LinkageName: f
; OBJ64: }
; OBJ64-NEXT: ProcEnd {
-; OBJ64-NEXT: }
+; OBJ64: }
; OBJ64-NEXT: ]
; OBJ64: FunctionLineTable [
; OBJ64-NEXT: Name: f
diff --git a/test/DebugInfo/COFF/typedef.ll b/test/DebugInfo/COFF/typedef.ll
index cf4e3df257de..9d841419c561 100644
--- a/test/DebugInfo/COFF/typedef.ll
+++ b/test/DebugInfo/COFF/typedef.ll
@@ -2,7 +2,7 @@
; CHECK: CodeViewDebugInfo [
; CHECK: Subsection [
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: wchar_t (0x71)
; CHECK: Flags [ (0x0)
; CHECK: ]
@@ -10,7 +10,7 @@
; CHECK: }
; CHECK: Subsection [
; CHECK: SubSectionType: Symbols (0xF1)
-; CHECK: UDT {
+; CHECK: UDTSym {
; CHECK: Type: wchar_t (0x71)
; CHECK: UDTName: XYZ
; CHECK: }
diff --git a/test/DebugInfo/COFF/types-array.ll b/test/DebugInfo/COFF/types-array.ll
index dca3884b1d09..1a4afa8bd219 100644
--- a/test/DebugInfo/COFF/types-array.ll
+++ b/test/DebugInfo/COFF/types-array.ll
@@ -46,7 +46,7 @@
; CHECK: Magic: 0x4
; CHECK: Subsection [
; CHECK: SubSectionType: Symbols (0xF1)
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: PtrParent: 0x0
; CHECK: PtrEnd: 0x0
; CHECK: PtrNext: 0x0
@@ -61,13 +61,13 @@
; CHECK: DisplayName: f
; CHECK: LinkageName: ?f@@YAXXZ
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: 0x1003
; CHECK: Flags [ (0x0)
; CHECK: ]
; CHECK: VarName: a
; CHECK: }
-; CHECK: DefRangeRegisterRel {
+; CHECK: DefRangeRegisterRelSym {
; CHECK: BaseRegister: 22
; CHECK: HasSpilledUDTMember: No
; CHECK: OffsetInParent: 0
diff --git a/test/DebugInfo/COFF/types-basic.ll b/test/DebugInfo/COFF/types-basic.ll
index 4ead4bfc1c4c..4b9fcd864c27 100644
--- a/test/DebugInfo/COFF/types-basic.ll
+++ b/test/DebugInfo/COFF/types-basic.ll
@@ -218,7 +218,7 @@
; CHECK: CodeViewDebugInfo [
; CHECK: Subsection [
; CHECK: SubSectionType: Symbols (0xF1)
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: DbgStart: 0x0
; CHECK: DbgEnd: 0x0
; CHECK: FunctionType: f (0x1002)
@@ -229,68 +229,68 @@
; CHECK: DisplayName: f
; CHECK: LinkageName: ?f@@YAXMN_J@Z
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: float (0x40)
; CHECK: Flags [ (0x1)
; CHECK: IsParameter (0x1)
; CHECK: ]
; CHECK: VarName: p1
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: double (0x41)
; CHECK: Flags [ (0x1)
; CHECK: IsParameter (0x1)
; CHECK: ]
; CHECK: VarName: p2
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: __int64 (0x13)
; CHECK: Flags [ (0x1)
; CHECK: IsParameter (0x1)
; CHECK: ]
; CHECK: VarName: p3
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: int (0x74)
; CHECK: VarName: v1
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: int* (0x674)
; CHECK: VarName: v2
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: const int* (0x1004)
; CHECK: VarName: v21
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: void* (0x603)
; CHECK: VarName: v3
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: int A::* (0x1006)
; CHECK: VarName: v4
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: void A::() A::* (0x100E)
; CHECK: VarName: v5
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: long (0x12)
; CHECK: VarName: l1
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: long (0x12)
; CHECK: VarName: l2
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: unsigned long (0x22)
; CHECK: VarName: l3
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: unsigned long (0x22)
; CHECK: VarName: l4
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: const void* (0x1010)
; CHECK: VarName: v6
; CHECK: }
@@ -298,48 +298,48 @@
; CHECK: }
; CHECK: ]
; CHECK: Subsection [
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: Type: CharTypes (0x1012)
; CHECK: DisplayName: CharTypes
; CHECK: LinkageName: ?CharTypes@@YAXXZ
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: wchar_t (0x71)
; CHECK: Flags [ (0x0)
; CHECK: ]
; CHECK: VarName: w
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: unsigned short (0x21)
; CHECK: Flags [ (0x0)
; CHECK: ]
; CHECK: VarName: us
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: char (0x70)
; CHECK: Flags [ (0x0)
; CHECK: ]
; CHECK: VarName: c
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: unsigned char (0x20)
; CHECK: Flags [ (0x0)
; CHECK: ]
; CHECK: VarName: uc
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: signed char (0x10)
; CHECK: Flags [ (0x0)
; CHECK: ]
; CHECK: VarName: sc
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: char16_t (0x7A)
; CHECK: Flags [ (0x0)
; CHECK: ]
; CHECK: VarName: c16
; CHECK: }
-; CHECK: Local {
+; CHECK: LocalSym {
; CHECK: Type: char32_t (0x7B)
; CHECK: Flags [ (0x0)
; CHECK: ]
diff --git a/test/DebugInfo/COFF/udts.ll b/test/DebugInfo/COFF/udts.ll
index abc688d70a61..735901f7571c 100644
--- a/test/DebugInfo/COFF/udts.ll
+++ b/test/DebugInfo/COFF/udts.ll
@@ -18,37 +18,39 @@ target triple = "i686-pc-windows-msvc18.0.0"
; typedef struct { int x; } U;
; U u;
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: DisplayName: f
; CHECK: LinkageName: ?f@@YAXXZ
; CHECK: }
-; CHECK: UDT {
+; CHECK: UDTSym {
+; CHECK-NEXT: Kind: S_UDT (0x1108)
; CHECK-NEXT: Type: int (0x74)
; CHECK-NEXT: UDTName: f::FOO
; CHECK-NEXT: }
; CHECK-NEXT: ProcEnd {
-; CHECK-NEXT: }
-; CHECK: ProcStart {
+; CHECK: {{.*}}Proc{{.*}}Sym {
; CHECK: DisplayName: g
; CHECK: LinkageName: ?g@@YAMPEAUS@@@Z
; CHECK: }
-; CHECK: UDT {
+; CHECK: UDTSym {
+; CHECK-NEXT: Kind: S_UDT (0x1108)
; CHECK-NEXT: Type: g::pun (0x{{[0-9A-F]+}})
; CHECK-NEXT: UDTName: g::pun
; CHECK-NEXT: }
; CHECK-NEXT: ProcEnd {
-; CHECK-NEXT: }
; CHECK: Subsection
-; CHECK-NOT: ProcStart
-; CHECK: UDT {
+; CHECK-NOT: {{.*}}Proc{{.*}}Sym
+; CHECK: UDTSym {
+; CHECK-NEXT: Kind: S_UDT (0x1108)
; CHECK-NEXT: Type: S (0x{{[0-9A-F]+}})
; CHECK-NEXT: UDTName: S
-; CHECK: UDT {
+; CHECK: UDTSym {
+; CHECK-NEXT: Kind: S_UDT (0x1108)
; CHECK-NEXT: Type: <unnamed-tag> (0x{{[0-9A-F]+}})
; CHECK-NEXT: UDTName: U
-; CHECK-NOT: UDT {
+; CHECK-NOT: UDTSym {
%struct.U = type { i32 }
%struct.S = type { i32 }
diff --git a/test/DebugInfo/Inputs/dwarfdump-str-offsets-macho.o b/test/DebugInfo/Inputs/dwarfdump-str-offsets-macho.o
new file mode 100644
index 000000000000..c0ed489d846c
--- /dev/null
+++ b/test/DebugInfo/Inputs/dwarfdump-str-offsets-macho.o
Binary files differ
diff --git a/test/DebugInfo/Inputs/dwarfdump-str-offsets-macho.s b/test/DebugInfo/Inputs/dwarfdump-str-offsets-macho.s
new file mode 100644
index 000000000000..9ee9ad234d84
--- /dev/null
+++ b/test/DebugInfo/Inputs/dwarfdump-str-offsets-macho.s
@@ -0,0 +1,201 @@
+# Test object to verify dwarfdump handles v5 string offset tables in Mach-O.
+# This is similar to dwarfdump-str-offsets.s with 2 CUs and 1 TU, but no
+# split sections.
+#
+# To generate the test object:
+# llvm-mc -triple i386-apple-darwin9 dwarfdump-str-offsets-macho.s -filetype=obj \
+# -o dwarfdump-str-offsets-macho.o
+
+ .section __DWARF,__debug_str,regular,debug
+Linfo_string:
+ .asciz "Handmade DWARF producer"
+str_CU1:
+ .asciz "Compile_Unit_1"
+str_CU1_dir:
+ .asciz "/home/test/CU1"
+str_CU2:
+ .asciz "Compile_Unit_2"
+str_CU2_dir:
+ .asciz "/home/test/CU2"
+str_TU:
+ .asciz "Type_Unit"
+str_TU_type:
+ .asciz "MyStruct"
+str_Subprogram:
+ .asciz "MyFunc"
+str_Variable1:
+ .asciz "MyVar1"
+str_Variable2:
+ .asciz "MyVar2"
+str_Variable3:
+ .asciz "MyVar3"
+
+ .section __DWARF,__debug_str_offs,regular,debug
+Ldebug_str_offsets:
+ .long Ldebug_str_offsets_segment0_end-Ldebug_str_offsets_base0
+ .short 5 # DWARF version
+ .short 0 # Padding
+Ldebug_str_offsets_base0:
+ .long str_producer
+ .long str_CU1
+ .long str_CU1_dir
+ .long str_Subprogram
+ .long str_Variable1
+ .long str_Variable2
+ .long str_Variable3
+Ldebug_str_offsets_segment0_end:
+# CU2's contribution
+ .long Ldebug_str_offsets_segment1_end-Ldebug_str_offsets_base1
+ .short 5 # DWARF version
+ .short 0 # Padding
+Ldebug_str_offsets_base1:
+ .long str_producer
+ .long str_CU2
+ .long str_CU2_dir
+Ldebug_str_offsets_segment1_end:
+# The TU's contribution
+ .long Ldebug_str_offsets_segment2_end-Ldebug_str_offsets_base2
+ .short 5 # DWARF version
+ .short 0 # Padding
+Ldebug_str_offsets_base2:
+ .long str_TU
+ .long str_TU_type
+Ldebug_str_offsets_segment2_end:
+
+ .section __DWARF,__debug_abbrev,regular,debug
+Lsection_abbrev:
+ .byte 0x01 # Abbrev code
+ .byte 0x11 # DW_TAG_compile_unit
+ .byte 0x01 # DW_CHILDREN_yes
+ .byte 0x25 # DW_AT_producer
+ .byte 0x1a # DW_FORM_strx
+ .byte 0x03 # DW_AT_name
+ .byte 0x1a # DW_FORM_strx
+ .byte 0x72 # DW_AT_str_offsets_base
+ .byte 0x17 # DW_FORM_sec_offset
+ .byte 0x1b # DW_AT_comp_dir
+ .byte 0x1a # DW_FORM_strx
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x02 # Abbrev code
+ .byte 0x41 # DW_TAG_type_unit
+ .byte 0x01 # DW_CHILDREN_yes
+ .byte 0x03 # DW_AT_name
+ .byte 0x1a # DW_FORM_strx
+ .byte 0x72 # DW_AT_str_offsets_base
+ .byte 0x17 # DW_FORM_sec_offset
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x03 # Abbrev code
+ .byte 0x13 # DW_TAG_structure_type
+ .byte 0x00 # DW_CHILDREN_no (no members)
+ .byte 0x03 # DW_AT_name
+ .byte 0x1a # DW_FORM_strx
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x04 # Abbrev code
+ .byte 0x2e # DW_TAG_subprogram
+ .byte 0x01 # DW_CHILDREN_yes
+ .byte 0x03 # DW_AT_name
+ .byte 0x25 # DW_FORM_strx1
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x05 # Abbrev code
+ .byte 0x34 # DW_TAG_variable
+ .byte 0x00 # DW_CHILDREN_no
+ .byte 0x03 # DW_AT_name
+ .byte 0x26 # DW_FORM_strx2
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x06 # Abbrev code
+ .byte 0x34 # DW_TAG_variable
+ .byte 0x00 # DW_CHILDREN_no
+ .byte 0x03 # DW_AT_name
+ .byte 0x27 # DW_FORM_strx3
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x07 # Abbrev code
+ .byte 0x34 # DW_TAG_variable
+ .byte 0x00 # DW_CHILDREN_no
+ .byte 0x03 # DW_AT_name
+ .byte 0x28 # DW_FORM_strx4
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x00 # EOM(3)
+
+ .section __DWARF,__debug_info,regular,debug
+Lsection_info:
+# DWARF v5 CU header.
+ .long CU1_5_end-CU1_5_version # Length of Unit
+CU1_5_version:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long 0 # Offset Into Abbrev. Section
+# The compile-unit DIE, which has a DW_AT_producer, DW_AT_name,
+# DW_AT_str_offsets and DW_AT_compdir.
+ .byte 1 # Abbreviation code
+ .byte 0 # The index of the producer string
+ .byte 1 # The index of the CU name string
+ .long Ldebug_str_offsets_base0-Ldebug_str_offsets
+ .byte 2 # The index of the comp dir string
+# A subprogram DIE with DW_AT_name, using DW_FORM_strx1.
+ .byte 4 # Abbreviation code
+ .byte 3 # Subprogram name string (DW_FORM_strx1)
+# A variable DIE with DW_AT_name, using DW_FORM_strx2.
+ .byte 5 # Abbreviation code
+ .short 0x0004 # Subprogram name string (DW_FORM_strx2)
+# A variable DIE with DW_AT_name, using DW_FORM_strx3.
+ .byte 6 # Abbreviation code
+ .byte 5 # Subprogram name string (DW_FORM_strx3)
+ .short 0 # Subprogram name string (DW_FORM_strx3)
+# A variable DIE with DW_AT_name, using DW_FORM_strx4.
+ .byte 7 # Abbreviation code
+ .quad 0x00000006 # Subprogram name string (DW_FORM_strx4)
+ .byte 0 # NULL
+ .byte 0 # NULL
+ .byte 0 # NULL
+CU1_5_end:
+
+# DWARF v5 CU header
+ .long CU2_5_end-CU2_5_version # Length of Unit
+CU2_5_version:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long 0 # Offset Into Abbrev. Section
+# The compile-unit DIE, which has a DW_AT_producer, DW_AT_name,
+# DW_AT_str_offsets and DW_AT_compdir.
+ .byte 1 # Abbreviation code
+ .byte 0 # The index of the producer string
+ .byte 1 # The index of the CU name string
+ .long Ldebug_str_offsets_base1-Ldebug_str_offsets
+ .byte 2 # The index of the comp dir string
+ .byte 0 # NULL
+CU2_5_end:
+
+ .section __DWARF,__debug_types,regular,debug
+# DWARF v5 Type unit header.
+TU_5_start:
+ .long TU_5_end-TU_5_version # Length of Unit
+TU_5_version:
+ .short 5 # DWARF version number
+ .byte 2 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long 0 # Offset Into Abbrev. Section
+ .quad 0x0011223344556677 # Type Signature
+ .long TU_5_type-TU_5_start # Type offset
+# The type-unit DIE, which has a name.
+ .byte 2 # Abbreviation code
+ .byte 0 # Index of the unit type name string
+ .long Ldebug_str_offsets_base2-Ldebug_str_offsets # offset into the str_offsets section
+# The type DIE, which has a name.
+TU_5_type:
+ .byte 3 # Abbreviation code
+ .byte 1 # Index of the type name string
+ .byte 0 # NULL
+ .byte 0 # NULL
+TU_5_end:
+
+
+.subsections_via_symbols
diff --git a/test/DebugInfo/Inputs/dwarfdump-test3.elf-x86-64 space b/test/DebugInfo/Inputs/dwarfdump-test3.elf-x86-64-space
index 7330cd8baa1e..7330cd8baa1e 100755
--- a/test/DebugInfo/Inputs/dwarfdump-test3.elf-x86-64 space
+++ b/test/DebugInfo/Inputs/dwarfdump-test3.elf-x86-64-space
Binary files differ
diff --git a/test/DebugInfo/PDB/Inputs/every-type.cpp b/test/DebugInfo/PDB/Inputs/every-type.cpp
new file mode 100644
index 000000000000..ed715b034300
--- /dev/null
+++ b/test/DebugInfo/PDB/Inputs/every-type.cpp
@@ -0,0 +1,63 @@
+// Build with "cl.exe /Zi /GR- /GX- every-type.cpp /link /debug /nodefaultlib /entry:main"
+
+// clang-format off
+void *__purecall = 0;
+
+void __cdecl operator delete(void *,unsigned int) {}
+
+struct FooStruct { }; // LF_STRUCTURE
+
+class FooClass { // LF_CLASS
+ // LF_FIELDLIST
+ enum NestedEnum { // LF_ENUM
+ // LF_NESTTYPE
+ A, B, C // LF_ENUMERATE
+ };
+
+ void RegularMethod() {} // LF_ARGLIST
+ // LF_ONEMETHOD
+ // LF_MFUNCTION
+
+ void OverloadedMethod(int) {} // LF_METHODLIST
+ // LF_METHOD
+ void OverloadedMethod(int, int) {}
+
+ int HiNibble : 4; // LF_BITFIELD
+ int LoNibble : 4;
+ NestedEnum EnumVariable; // LF_MEMBER
+ static void *StaticMember; // LF_POINTER
+ // LF_STMEMBER
+};
+
+void *FooClass::StaticMember = nullptr;
+
+class Inherit : public FooClass { // LF_BCLASS
+public:
+ virtual ~Inherit() {} // LF_VTSHAPE
+ // LF_VFUNCTAB
+};
+
+class VInherit : public virtual FooClass { // LF_VBCLASS
+
+};
+
+class IVInherit : public VInherit { // LF_IVBCLASS
+};
+
+union TheUnion {
+ int X; // LF_UNION
+};
+
+int SomeArray[7] = {1, 2, 3, 4, 5, 6, 7}; // LF_ARRAY
+
+int main(int argc, char **argv) { // LF_PROCEDURE
+ const int X = 7; // LF_MODIFIER
+
+ FooStruct FooStructInstance;
+ FooClass FooClassInstance;
+ Inherit InheritInstance;
+ VInherit VInheritInstance;
+ IVInherit IVInheritInstance;
+ TheUnion UnionInstance;
+ return SomeArray[argc];
+}
diff --git a/test/DebugInfo/PDB/Inputs/every-type.pdb b/test/DebugInfo/PDB/Inputs/every-type.pdb
new file mode 100644
index 000000000000..64996d61d3e7
--- /dev/null
+++ b/test/DebugInfo/PDB/Inputs/every-type.pdb
Binary files differ
diff --git a/test/DebugInfo/PDB/Inputs/every-type.yaml b/test/DebugInfo/PDB/Inputs/every-type.yaml
new file mode 100644
index 000000000000..8f23e8ad5e89
--- /dev/null
+++ b/test/DebugInfo/PDB/Inputs/every-type.yaml
@@ -0,0 +1,272 @@
+---
+TpiStream:
+ Records:
+ # int* [Index: 0x1000]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 116 # int
+ Attrs: 32778
+ # const int* [Index: 0x1001]
+ - Kind: LF_MODIFIER
+ Modifier:
+ ModifiedType: 0x1000
+ Modifiers: [ Const ]
+ # char* [Index: 0x1002]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 1136 # char*
+ Attrs: 32778
+ # (int, char **) [Index: 0x1003]
+ - Kind: LF_ARGLIST
+ ArgList:
+ ArgIndicies: [ 116, 0x1002 ]
+ # (int, double) [Index: 0x1004]
+ - Kind: LF_ARGLIST
+ ArgList:
+ ArgIndicies: [ 116, 65 ] # (int, double)
+ # int main(int argc, char **argv) [Index: 0x1005]
+ - Kind: LF_PROCEDURE
+ Procedure:
+ ReturnType: 117 # int
+ CallConv: NearC # __cdecl
+ Options: [ None ]
+ ParameterCount: 2
+ ArgumentList: 0x1003 # (int, char**)
+ # <label> [Index: 0x1006]
+ - Kind: LF_LABEL
+ Label:
+ Mode: Near
+ # <forward decl>
+ # class FooClass; [Index: 0x1007]
+ - Kind: LF_STRUCTURE
+ Class:
+ MemberCount: 0
+ Options: [ None, ForwardReference ]
+ FieldList: 0
+ Name: 'FooClass'
+ DerivationList: 0
+ VTableShape: 0
+ Size: 0
+ # char* [Index: 0x1008]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 0x1007 # FooClass
+ Attrs: 33802 # const
+ # int (FooClass::)(int, char **) [Index: 0x1009]
+ - Kind: LF_MFUNCTION
+ MemberFunction:
+ ReturnType: 116 # int
+ ClassType: 0x1007 # FooClass
+ ThisType: 0x1008 # const FooClass*
+ CallConv: ThisCall
+ Options: [ None ]
+ ParameterCount: 2
+ ArgumentList: 0x1003 # (int, char**)
+ ThisPointerAdjustment: 0
+ # int (FooClass::)(int, double) [Index: 0x100A]
+ - Kind: LF_MFUNCTION
+ MemberFunction:
+ ReturnType: 116 # int
+ ClassType: 0x1007 # FooClass
+ ThisType: 0x1008 # const FooClass*
+ CallConv: ThisCall
+ Options: [ None ]
+ ParameterCount: 2
+ ArgumentList: 0x1004 # (int, double)
+ ThisPointerAdjustment: 0
+ # <method overload list>
+ # int (FooClass::)(int, char **)
+ # int (FooClass::)(int, double) [Index: 0x100B]
+ - Kind: LF_METHODLIST
+ MethodOverloadList:
+ Methods:
+ - Type: 0x1009 # int (FooClass::)(int, char **)
+ Attrs: 3 # public
+ VFTableOffset: -1
+ Name: ''
+ - Type: 0x100A # int (FooClass::)(int, double)
+ Attrs: 3 # public
+ VFTableOffset: -1
+ Name: ''
+ # <Field List>
+ # A, B, C [Index: 0x100C]
+ - Kind: LF_FIELDLIST
+ FieldList:
+ - Kind: LF_ENUMERATE
+ Enumerator:
+ Attrs: 3
+ Value: 0
+ Name: A
+ - Kind: LF_ENUMERATE
+ Enumerator:
+ Attrs: 3
+ Value: 1
+ Name: B
+ - Kind: LF_ENUMERATE
+ Enumerator:
+ Attrs: 3
+ Value: 2
+ Name: C
+ # enum FooClass::Enum : uint32_t {
+ # A, B, C
+ # }; [Index: 0x100D]
+ - Kind: LF_ENUM
+ Enum:
+ NumEnumerators: 3
+ Options: [ None, Nested ]
+ FieldList: 0x100C
+ Name: 'FooClass::Enum'
+ UnderlyingType: 117
+ # <Field List>
+ # public:
+ # enum FooEnum : uint32_t {
+ # A, B, C
+ # };
+ # FooEnum EnumMember;
+ # static int StaticInt;
+ # int FooClass::OverloadedMethod(int, char **);
+ # int FooClass::OverloadedMethod(int, double);
+ # int FooClass::RegularMethod(int, double);
+ # [Index: 0x100E]
+ - Kind: LF_FIELDLIST
+ FieldList:
+ # enum FooEnum : uint32_t {
+ # A, B, C
+ # };
+ - Kind: LF_NESTTYPE
+ NestedType:
+ Type: 0x100D
+ Name: FooEnum
+ # FooEnum EnumMember;
+ - Kind: LF_MEMBER
+ DataMember:
+ Attrs: 3 # public
+ Type: 0x100D # void*
+ FieldOffset: 0
+ Name: EnumMember
+ # static int StaticInt;
+ - Kind: LF_STMEMBER
+ StaticDataMember:
+ Attrs: 3 # public
+ Type: 116 # int
+ Name: StaticInt
+ # int FooClass::OverloadedMethod(int, char **);
+ # int FooClass::OverloadedMethod(int, double);
+ - Kind: LF_METHOD
+ OverloadedMethod:
+ NumOverloads: 2
+ MethodList: 0x100B
+ Name: OverloadedMethod
+ # int FooClass::RegularMethod(int, double);
+ - Kind: LF_ONEMETHOD
+ OneMethod:
+ Type: 0x100A
+ Attrs: 3 # public
+ VFTableOffset: -1
+ Name: RegularMethod
+ # class FooClass {
+ # public:
+ # enum FooEnum : uint32_t {
+ # A, B, C
+ # };
+ # FooEnum EnumMember;
+ # static int StaticInt;
+ # int FooClass::OverloadedMethod(int, char **);
+ # int FooClass::OverloadedMethod(int, double);
+ # int FooClass::RegularMethod(int, double);
+ # }; [Index: 0x100F]
+ - Kind: LF_CLASS
+ Class:
+ MemberCount: 6
+ Options: [ None ]
+ FieldList: 0x100E
+ Name: 'FooClass'
+ DerivationList: 0
+ VTableShape: 0
+ Size: 4
+ # struct FooStructure; [Index: 0x1010]
+ - Kind: LF_STRUCTURE
+ Class:
+ MemberCount: 6
+ Options: [ None ]
+ FieldList: 0x100E
+ Name: 'FooStructure'
+ DerivationList: 0
+ VTableShape: 0
+ Size: 4
+ # interface FooInterface; [Index: 0x1011]
+ - Kind: LF_INTERFACE
+ Class:
+ MemberCount: 6
+ Options: [ None ]
+ FieldList: 0x100E
+ Name: 'FooInterface'
+ DerivationList: 0
+ VTableShape: 0
+ Size: 4
+ # <field list>
+ # : public FooClass [Index: 0x1012]
+ - Kind: LF_FIELDLIST
+ FieldList:
+ - Kind: LF_BCLASS
+ Attrs: 3 # public
+ Type: 0x100F # FooClass
+ Offset: 0
+ # <field list>
+ # : public virtual FooClass [Index: 0x1013]
+ - Kind: LF_FIELDLIST
+ FieldList:
+ - Kind: LF_VBCLASS
+ Attrs: 3 # public
+ BaseType: 0x100F # FooClass
+ VBPtrType: 0x1001 # const int *
+ VBPtrOffset: 0
+ VTableIndex: 1
+ # class Inherit : public FooClass {}; [Index: 0x1014]
+ - Kind: LF_STRUCTURE
+ Class:
+ MemberCount: 1
+ Options: [ None ]
+ FieldList: 0x100E
+ Name: 'Inherit'
+ DerivationList: 0x1012
+ VTableShape: 0
+ Size: 4
+ # class VInherit : public virtual FooClass {}; [Index: 0x1015]
+ - Kind: LF_STRUCTURE
+ Class:
+ MemberCount: 1
+ Options: [ None ]
+ FieldList: 0x100E
+ Name: 'Inherit'
+ DerivationList: 0x1012
+ VTableShape: 0
+ Size: 4
+
+# // Member type records. These are generally not length prefixed, and appear
+# // inside of a field list record.
+# MEMBER_RECORD(LF_VFUNCTAB, 0x1409, VFPtr)
+
+# MEMBER_RECORD_ALIAS(LF_BINTERFACE, 0x151a, BaseInterface, BaseClass)
+
+# MEMBER_RECORD_ALIAS(LF_IVBCLASS, 0x1402, IndirectVirtualBaseClass,
+# VirtualBaseClass)
+
+
+# TYPE_RECORD(LF_ARRAY, 0x1503, Array)
+# TYPE_RECORD(LF_UNION, 0x1506, Union)
+# TYPE_RECORD(LF_TYPESERVER2, 0x1515, TypeServer2)
+# TYPE_RECORD(LF_VFTABLE, 0x151d, VFTable)
+# TYPE_RECORD(LF_VTSHAPE, 0x000a, VFTableShape)
+
+# TYPE_RECORD(LF_BITFIELD, 0x1205, BitField)
+
+
+# // ID leaf records. Subsequent leaf types may be referenced from .debug$S.
+# TYPE_RECORD(LF_FUNC_ID, 0x1601, FuncId)
+# TYPE_RECORD(LF_MFUNC_ID, 0x1602, MemberFuncId)
+# TYPE_RECORD(LF_BUILDINFO, 0x1603, BuildInfo)
+# TYPE_RECORD(LF_SUBSTR_LIST, 0x1604, StringList)
+# TYPE_RECORD(LF_STRING_ID, 0x1605, StringId)
+# TYPE_RECORD(LF_UDT_SRC_LINE, 0x1606, UdtSourceLine)
+# TYPE_RECORD(LF_UDT_MOD_SRC_LINE, 0x1607, UdtModSourceLine)
diff --git a/test/DebugInfo/PDB/every-type.test b/test/DebugInfo/PDB/every-type.test
new file mode 100644
index 000000000000..e6b9c15815d0
--- /dev/null
+++ b/test/DebugInfo/PDB/every-type.test
@@ -0,0 +1,261 @@
+The test input (every-type.pdb) is generated from some short and trivial C++ code
+that exercises the entire type system to generate every possible type record that
+we claim to understand. We then test this in two ways:
+ 1) We just dump the output for the purposes of readability. This tests that we
+ we can dump every possible type record.
+ 2) We dump the output to yaml, and then re-generate a PDB with the same type
+ stream, and then run test 1 on the new PDB. This verifies that the PDB
+ hasn't changed.
+
+
+RUN: llvm-pdbutil dump -type-index=0x1018,0x102A,0x103B,0x1093,0x1095,0x1096,0x1098 \
+RUN: -dependents %p/Inputs/every-type.pdb | FileCheck --check-prefix=TYPES %s
+
+RUN: llvm-pdbutil pdb2yaml -tpi-stream -ipi-stream %p/Inputs/every-type.pdb > %t.pdb.yaml
+RUN: llvm-pdbutil yaml2pdb -pdb=%t.yaml.pdb %t.pdb.yaml
+RUN: llvm-pdbutil dump -type-index=0x1018,0x102A,0x103B,0x1093,0x1095,0x1096,0x1098 \
+RUN: -dependents %t.yaml.pdb | FileCheck --check-prefix=TYPES %s
+
+TYPES: Types (TPI Stream)
+TYPES-NEXT: ============================================================
+TYPES-NEXT: Showing 7 records and their dependents (73 records total)
+TYPES-NEXT: 0x1005 | LF_MODIFIER [size = 12]
+TYPES-NEXT: referent = 0x0074 (int), modifiers = const
+TYPES-NEXT: 0x1006 | LF_CLASS [size = 48] `FooClass`
+TYPES-NEXT: unique name: `.?AVFooClass@@`
+TYPES-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
+TYPES-NEXT: options: forward ref | has unique name
+TYPES-NEXT: 0x1007 | LF_VTSHAPE [size = 8]
+TYPES-NEXT: 0x1008 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x1007, mode = pointer, opts = None, kind = ptr32
+TYPES-NEXT: 0x1009 | LF_CLASS [size = 44] `Inherit`
+TYPES-NEXT: unique name: `.?AVInherit@@`
+TYPES-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
+TYPES-NEXT: options: forward ref | has unique name
+TYPES-NEXT: 0x100A | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x1009, mode = pointer, opts = const, kind = ptr32
+TYPES-NEXT: 0x100B | LF_ARGLIST [size = 8]
+TYPES-NEXT: 0x100C | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x100B
+TYPES-NEXT: class type = 0x1009, this type = 0x100A, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x100D | LF_MODIFIER [size = 12]
+TYPES-NEXT: referent = 0x1009, modifiers = const
+TYPES-NEXT: 0x100E | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x100D, mode = ref, opts = None, kind = ptr32
+TYPES-NEXT: 0x100F | LF_ARGLIST [size = 12]
+TYPES-NEXT: 0x100E: `const Inherit&`
+TYPES-NEXT: 0x1010 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x100F
+TYPES-NEXT: class type = 0x1009, this type = 0x100A, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = constructor
+TYPES-NEXT: 0x1011 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x100B
+TYPES-NEXT: class type = 0x1009, this type = 0x100A, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = constructor
+TYPES-NEXT: 0x1012 | LF_METHODLIST [size = 20]
+TYPES-NEXT: - Method [type = 0x1010, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: - Method [type = 0x1011, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: 0x1013 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x1009, mode = ref, opts = None, kind = ptr32
+TYPES-NEXT: 0x1014 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x1013, # args = 1, param list = 0x100F
+TYPES-NEXT: class type = 0x1009, this type = 0x100A, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x1015 | LF_ARGLIST [size = 12]
+TYPES-NEXT: 0x0075 (unsigned): `unsigned`
+TYPES-NEXT: 0x1016 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0403 (void*), # args = 1, param list = 0x1015
+TYPES-NEXT: class type = 0x1009, this type = 0x100A, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x1017 | LF_FIELDLIST [size = 152]
+TYPES-NEXT: - LF_BCLASS
+TYPES-NEXT: type = 0x1006, offset = 4, attrs = public
+TYPES-NEXT: - LF_VFUNCTAB type = 0x1008
+TYPES-NEXT: - LF_ONEMETHOD [name = `~Inherit`]
+TYPES-NEXT: type = 0x100C, vftable offset = 0, attrs = public intro virtual
+TYPES-NEXT: - LF_METHOD [name = `Inherit`, # overloads = 2, overload list = 0x1012]
+TYPES-NEXT: - LF_ONEMETHOD [name = `operator=`]
+TYPES-NEXT: type = 0x1014, vftable offset = -1, attrs = public compiler-generated
+TYPES-NEXT: - LF_ONEMETHOD [name = `__local_vftable_ctor_closure`]
+TYPES-NEXT: type = 0x100C, vftable offset = -1, attrs = public compiler-generated
+TYPES-NEXT: - LF_ONEMETHOD [name = `__vecDelDtor`]
+TYPES-NEXT: type = 0x1016, vftable offset = 0, attrs = public intro virtual compiler-generated
+TYPES-NEXT: 0x1018 | LF_CLASS [size = 44] `Inherit`
+TYPES-NEXT: unique name: `.?AVInherit@@`
+TYPES-NEXT: vtable: 0x1007, base list: <no type>, field list: 0x1017
+TYPES-NEXT: options: has ctor / dtor | has unique name | overloaded operator | overloaded operator=
+TYPES-NEXT: 0x1019 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x1005, mode = pointer, opts = None, kind = ptr32
+TYPES-NEXT: 0x101A | LF_CLASS [size = 48] `VInherit`
+TYPES-NEXT: unique name: `.?AVVInherit@@`
+TYPES-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
+TYPES-NEXT: options: forward ref | has unique name
+TYPES-NEXT: 0x101B | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x101A, mode = pointer, opts = const, kind = ptr32
+TYPES-NEXT: 0x101C | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x101A, mode = rvalue ref, opts = None, kind = ptr32
+TYPES-NEXT: 0x101D | LF_ARGLIST [size = 12]
+TYPES-NEXT: 0x101C: `VInherit&&`
+TYPES-NEXT: 0x101E | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x101D
+TYPES-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = constructor with virtual bases | constructor
+TYPES-NEXT: 0x101F | LF_MODIFIER [size = 12]
+TYPES-NEXT: referent = 0x101A, modifiers = const
+TYPES-NEXT: 0x1020 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x101F, mode = ref, opts = None, kind = ptr32
+TYPES-NEXT: 0x1021 | LF_ARGLIST [size = 12]
+TYPES-NEXT: 0x1020: `const VInherit&`
+TYPES-NEXT: 0x1022 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1021
+TYPES-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = constructor with virtual bases | constructor
+TYPES-NEXT: 0x1023 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x100B
+TYPES-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = constructor with virtual bases | constructor
+TYPES-NEXT: 0x1024 | LF_METHODLIST [size = 28]
+TYPES-NEXT: - Method [type = 0x101E, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: - Method [type = 0x1022, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: - Method [type = 0x1023, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: 0x1025 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x101A, mode = ref, opts = None, kind = ptr32
+TYPES-NEXT: 0x1026 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x1025, # args = 1, param list = 0x101D
+TYPES-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x1027 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x1025, # args = 1, param list = 0x1021
+TYPES-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x1028 | LF_METHODLIST [size = 20]
+TYPES-NEXT: - Method [type = 0x1026, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: - Method [type = 0x1027, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: 0x1029 | LF_FIELDLIST [size = 60]
+TYPES-NEXT: - LF_VBCLASS
+TYPES-NEXT: base = 0x1006, vbptr = 0x1019, vbptr offset = 0, vtable index = 1
+TYPES-NEXT: attrs = public
+TYPES-NEXT: - LF_METHOD [name = `VInherit`, # overloads = 3, overload list = 0x1024]
+TYPES-NEXT: - LF_METHOD [name = `operator=`, # overloads = 2, overload list = 0x1028]
+TYPES-NEXT: 0x102A | LF_CLASS [size = 48] `VInherit`
+TYPES-NEXT: unique name: `.?AVVInherit@@`
+TYPES-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1029
+TYPES-NEXT: options: has ctor / dtor | has unique name | overloaded operator | overloaded operator=
+TYPES-NEXT: 0x102B | LF_CLASS [size = 48] `IVInherit`
+TYPES-NEXT: unique name: `.?AVIVInherit@@`
+TYPES-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
+TYPES-NEXT: options: forward ref | has unique name
+TYPES-NEXT: 0x102C | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x102B, mode = pointer, opts = const, kind = ptr32
+TYPES-NEXT: 0x102D | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x102B, mode = rvalue ref, opts = None, kind = ptr32
+TYPES-NEXT: 0x102E | LF_ARGLIST [size = 12]
+TYPES-NEXT: 0x102D: `IVInherit&&`
+TYPES-NEXT: 0x102F | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x102E
+TYPES-NEXT: class type = 0x102B, this type = 0x102C, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = constructor with virtual bases | constructor
+TYPES-NEXT: 0x1030 | LF_MODIFIER [size = 12]
+TYPES-NEXT: referent = 0x102B, modifiers = const
+TYPES-NEXT: 0x1031 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x1030, mode = ref, opts = None, kind = ptr32
+TYPES-NEXT: 0x1032 | LF_ARGLIST [size = 12]
+TYPES-NEXT: 0x1031: `const IVInherit&`
+TYPES-NEXT: 0x1033 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1032
+TYPES-NEXT: class type = 0x102B, this type = 0x102C, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = constructor with virtual bases | constructor
+TYPES-NEXT: 0x1034 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x100B
+TYPES-NEXT: class type = 0x102B, this type = 0x102C, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = constructor with virtual bases | constructor
+TYPES-NEXT: 0x1035 | LF_METHODLIST [size = 28]
+TYPES-NEXT: - Method [type = 0x102F, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: - Method [type = 0x1033, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: - Method [type = 0x1034, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: 0x1036 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x102B, mode = ref, opts = None, kind = ptr32
+TYPES-NEXT: 0x1037 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x1036, # args = 1, param list = 0x102E
+TYPES-NEXT: class type = 0x102B, this type = 0x102C, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x1038 | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x1036, # args = 1, param list = 0x1032
+TYPES-NEXT: class type = 0x102B, this type = 0x102C, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x1039 | LF_METHODLIST [size = 20]
+TYPES-NEXT: - Method [type = 0x1037, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: - Method [type = 0x1038, vftable offset = -1, attrs = public compiler-generated]
+TYPES-NEXT: 0x103A | LF_FIELDLIST [size = 72]
+TYPES-NEXT: - LF_BCLASS
+TYPES-NEXT: type = 0x101A, offset = 0, attrs = public
+TYPES-NEXT: - LF_IVBCLASS
+TYPES-NEXT: base = 0x1006, vbptr = 0x1019, vbptr offset = 0, vtable index = 1
+TYPES-NEXT: attrs = public
+TYPES-NEXT: - LF_METHOD [name = `IVInherit`, # overloads = 3, overload list = 0x1035]
+TYPES-NEXT: - LF_METHOD [name = `operator=`, # overloads = 2, overload list = 0x1039]
+TYPES-NEXT: 0x103B | LF_CLASS [size = 48] `IVInherit`
+TYPES-NEXT: unique name: `.?AVIVInherit@@`
+TYPES-NEXT: vtable: <no type>, base list: <no type>, field list: 0x103A
+TYPES-NEXT: options: has ctor / dtor | has unique name | overloaded operator | overloaded operator=
+TYPES-NEXT: 0x1087 | LF_FIELDLIST [size = 28]
+TYPES-NEXT: - LF_ENUMERATE [A = 0]
+TYPES-NEXT: - LF_ENUMERATE [B = 1]
+TYPES-NEXT: - LF_ENUMERATE [C = 2]
+TYPES-NEXT: 0x1088 | LF_ENUM [size = 64] `FooClass::NestedEnum`
+TYPES-NEXT: unique name: `.?AW4NestedEnum@FooClass@@`
+TYPES-NEXT: field list: 0x1087, underlying type: 0x0074 (int)
+TYPES-NEXT: options: has unique name | is nested
+TYPES-NEXT: 0x1089 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x1006, mode = pointer, opts = const, kind = ptr32
+TYPES-NEXT: 0x108A | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x100B
+TYPES-NEXT: class type = 0x1006, this type = 0x1089, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x108B | LF_ARGLIST [size = 16]
+TYPES-NEXT: 0x0074 (int): `int`
+TYPES-NEXT: 0x0074 (int): `int`
+TYPES-NEXT: 0x108C | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 2, param list = 0x108B
+TYPES-NEXT: class type = 0x1006, this type = 0x1089, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x108D | LF_ARGLIST [size = 12]
+TYPES-NEXT: 0x0074 (int): `int`
+TYPES-NEXT: 0x108E | LF_MFUNCTION [size = 28]
+TYPES-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x108D
+TYPES-NEXT: class type = 0x1006, this type = 0x1089, this adjust = 0
+TYPES-NEXT: calling conv = thiscall, options = None
+TYPES-NEXT: 0x108F | LF_METHODLIST [size = 20]
+TYPES-NEXT: - Method [type = 0x108C, vftable offset = -1, attrs = private]
+TYPES-NEXT: - Method [type = 0x108E, vftable offset = -1, attrs = private]
+TYPES-NEXT: 0x1090 | LF_BITFIELD [size = 12]
+TYPES-NEXT: type = 0x0074 (int), bit offset = 0, # bits = 4
+TYPES-NEXT: 0x1091 | LF_BITFIELD [size = 12]
+TYPES-NEXT: type = 0x0074 (int), bit offset = 4, # bits = 4
+TYPES-NEXT: 0x1092 | LF_FIELDLIST [size = 164]
+TYPES-NEXT: - LF_NESTTYPE [name = `NestedEnum`, parent = 0x1088]
+TYPES-NEXT: - LF_ONEMETHOD [name = `RegularMethod`]
+TYPES-NEXT: type = 0x108A, vftable offset = -1, attrs = private
+TYPES-NEXT: - LF_METHOD [name = `OverloadedMethod`, # overloads = 2, overload list = 0x108F]
+TYPES-NEXT: - LF_MEMBER [name = `HiNibble`, Type = 0x1090, offset = 0, attrs = private]
+TYPES-NEXT: - LF_MEMBER [name = `LoNibble`, Type = 0x1091, offset = 0, attrs = private]
+TYPES-NEXT: - LF_MEMBER [name = `EnumVariable`, Type = 0x1088, offset = 4, attrs = private]
+TYPES-NEXT: - LF_STMEMBER [name = `StaticMember`, type = 0x0403 (void*), attrs = private]
+TYPES-NEXT: 0x1093 | LF_CLASS [size = 48] `FooClass`
+TYPES-NEXT: unique name: `.?AVFooClass@@`
+TYPES-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1092
+TYPES-NEXT: options: contains nested class | has unique name
+TYPES-NEXT: 0x1094 | LF_FIELDLIST [size = 16]
+TYPES-NEXT: - LF_MEMBER [name = `X`, Type = 0x0074 (int), offset = 0, attrs = public]
+TYPES-NEXT: 0x1095 | LF_UNION [size = 40] `TheUnion`
+TYPES-NEXT: unique name: `.?ATTheUnion@@`
+TYPES-NEXT: field list: 0x1094
+TYPES-NEXT: options: has unique name | sealed
+TYPES-NEXT: 0x1096 | LF_PROCEDURE [size = 16]
+TYPES-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x100B
+TYPES-NEXT: calling conv = cdecl, options = None
+TYPES-NEXT: 0x1097 | LF_POINTER [size = 12]
+TYPES-NEXT: referent = 0x1096, mode = pointer, opts = const, kind = ptr32
+TYPES-NEXT: 0x1098 | LF_ARRAY [size = 16]
+TYPES-NEXT: size: 4, index type: 0x0022 (unsigned long), element type: 0x1097
diff --git a/test/DebugInfo/PDB/pdbdump-headers.test b/test/DebugInfo/PDB/pdbdump-headers.test
index 3b7895e06b77..1887af2e8268 100644
--- a/test/DebugInfo/PDB/pdbdump-headers.test
+++ b/test/DebugInfo/PDB/pdbdump-headers.test
@@ -67,9 +67,11 @@ ALL-NEXT: ============================================================
ALL-NEXT: Mod 0000 | Name: `d:\src\llvm\test\DebugInfo\PDB\Inputs\empty.obj`:
ALL-NEXT: Obj: `d:\src\llvm\test\DebugInfo\PDB\Inputs\empty.obj`:
ALL-NEXT: debug stream: 12, # files: 1, has ec info: false
+ALL-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
ALL-NEXT: Mod 0001 | Name: `* Linker *`:
ALL-NEXT: Obj: ``:
ALL-NEXT: debug stream: 14, # files: 0, has ec info: false
+ALL-NEXT: pdb file ni: 1 `{{.*empty.pdb}}`, src file ni: 0 ``
ALL: Files
ALL-NEXT: ============================================================
ALL-NEXT: Mod 0000 | `d:\src\llvm\test\DebugInfo\PDB\Inputs\empty.obj`:
@@ -99,13 +101,11 @@ ALL-NEXT: - LF_ENUMERATE [single = 2]
ALL-NEXT: - LF_ENUMERATE [free = 3]
ALL-NEXT: - LF_ENUMERATE [neutral = 4]
ALL-NEXT: - LF_ENUMERATE [both = 5]
-ALL-NEXT: 0x1003 | LF_ENUM [size = 120, hash = 208239]
-ALL-NEXT: name: `__vc_attributes::threadingAttribute::threading_e`
+ALL-NEXT: 0x1003 | LF_ENUM [size = 120, hash = 208239] `__vc_attributes::threadingAttribute::threading_e`
ALL-NEXT: unique name: `.?AW4threading_e@threadingAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x1002, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x1004 | LF_STRUCTURE [size = 100, hash = 16377]
-ALL-NEXT: class name: `__vc_attributes::threadingAttribute`
+ALL-NEXT: 0x1004 | LF_STRUCTURE [size = 100, hash = 16377] `__vc_attributes::threadingAttribute`
ALL-NEXT: unique name: `.?AUthreadingAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
@@ -128,8 +128,7 @@ ALL-NEXT: 0x100A | LF_FIELDLIST [size = 68, hash = 185421]
ALL-NEXT: - LF_NESTTYPE [name = `threading_e`, parent = 0x1003]
ALL-NEXT: - LF_METHOD [name = `threadingAttribute`, # overloads = 2, overload list = 0x1009]
ALL-NEXT: - LF_MEMBER [name = `value`, Type = 0x1003, offset = 0, attrs = public]
-ALL-NEXT: 0x100B | LF_STRUCTURE [size = 100, hash = 119540]
-ALL-NEXT: class name: `__vc_attributes::threadingAttribute`
+ALL-NEXT: 0x100B | LF_STRUCTURE [size = 100, hash = 119540] `__vc_attributes::threadingAttribute`
ALL-NEXT: unique name: `.?AUthreadingAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x100A
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
@@ -137,13 +136,11 @@ ALL-NEXT: 0x100C | LF_FIELDLIST [size = 48, hash = 261871]
ALL-NEXT: - LF_ENUMERATE [native = 0]
ALL-NEXT: - LF_ENUMERATE [com = 1]
ALL-NEXT: - LF_ENUMERATE [managed = 2]
-ALL-NEXT: 0x100D | LF_ENUM [size = 120, hash = 198119]
-ALL-NEXT: name: `__vc_attributes::event_receiverAttribute::type_e`
+ALL-NEXT: 0x100D | LF_ENUM [size = 120, hash = 198119] `__vc_attributes::event_receiverAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@event_receiverAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x100C, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x100E | LF_STRUCTURE [size = 112, hash = 48056]
-ALL-NEXT: class name: `__vc_attributes::event_receiverAttribute`
+ALL-NEXT: 0x100E | LF_STRUCTURE [size = 112, hash = 48056] `__vc_attributes::event_receiverAttribute`
ALL-NEXT: unique name: `.?AUevent_receiverAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
@@ -175,8 +172,7 @@ ALL-NEXT: - LF_NESTTYPE [name = `type_e`, parent = 0x100D]
ALL-NEXT: - LF_METHOD [name = `event_receiverAttribute`, # overloads = 3, overload list = 0x1015]
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x100D, offset = 0, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `layout_dependent`, Type = 0x0030 (bool), offset = 4, attrs = public]
-ALL-NEXT: 0x1017 | LF_STRUCTURE [size = 112, hash = 148734]
-ALL-NEXT: class name: `__vc_attributes::event_receiverAttribute`
+ALL-NEXT: 0x1017 | LF_STRUCTURE [size = 112, hash = 148734] `__vc_attributes::event_receiverAttribute`
ALL-NEXT: unique name: `.?AUevent_receiverAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1016
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
@@ -184,13 +180,11 @@ ALL-NEXT: 0x1018 | LF_FIELDLIST [size = 48, hash = 81128]
ALL-NEXT: - LF_ENUMERATE [never = 0]
ALL-NEXT: - LF_ENUMERATE [allowed = 1]
ALL-NEXT: - LF_ENUMERATE [always = 2]
-ALL-NEXT: 0x1019 | LF_ENUM [size = 116, hash = 60158]
-ALL-NEXT: name: `__vc_attributes::aggregatableAttribute::type_e`
+ALL-NEXT: 0x1019 | LF_ENUM [size = 116, hash = 60158] `__vc_attributes::aggregatableAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@aggregatableAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x1018, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x101A | LF_STRUCTURE [size = 108, hash = 217249]
-ALL-NEXT: class name: `__vc_attributes::aggregatableAttribute`
+ALL-NEXT: 0x101A | LF_STRUCTURE [size = 108, hash = 217249] `__vc_attributes::aggregatableAttribute`
ALL-NEXT: unique name: `.?AUaggregatableAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
@@ -213,26 +207,22 @@ ALL-NEXT: 0x1020 | LF_FIELDLIST [size = 68, hash = 6214]
ALL-NEXT: - LF_NESTTYPE [name = `type_e`, parent = 0x1019]
ALL-NEXT: - LF_METHOD [name = `aggregatableAttribute`, # overloads = 2, overload list = 0x101F]
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x1019, offset = 0, attrs = public]
-ALL-NEXT: 0x1021 | LF_STRUCTURE [size = 108, hash = 94935]
-ALL-NEXT: class name: `__vc_attributes::aggregatableAttribute`
+ALL-NEXT: 0x1021 | LF_STRUCTURE [size = 108, hash = 94935] `__vc_attributes::aggregatableAttribute`
ALL-NEXT: unique name: `.?AUaggregatableAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1020
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
-ALL-NEXT: 0x1022 | LF_ENUM [size = 116, hash = 151449]
-ALL-NEXT: name: `__vc_attributes::event_sourceAttribute::type_e`
+ALL-NEXT: 0x1022 | LF_ENUM [size = 116, hash = 151449] `__vc_attributes::event_sourceAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@event_sourceAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x100C, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
ALL-NEXT: 0x1023 | LF_FIELDLIST [size = 28, hash = 135589]
ALL-NEXT: - LF_ENUMERATE [speed = 0]
ALL-NEXT: - LF_ENUMERATE [size = 1]
-ALL-NEXT: 0x1024 | LF_ENUM [size = 124, hash = 73373]
-ALL-NEXT: name: `__vc_attributes::event_sourceAttribute::optimize_e`
+ALL-NEXT: 0x1024 | LF_ENUM [size = 124, hash = 73373] `__vc_attributes::event_sourceAttribute::optimize_e`
ALL-NEXT: unique name: `.?AW4optimize_e@event_sourceAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x1023, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x1025 | LF_STRUCTURE [size = 108, hash = 96512]
-ALL-NEXT: class name: `__vc_attributes::event_sourceAttribute`
+ALL-NEXT: 0x1025 | LF_STRUCTURE [size = 108, hash = 96512] `__vc_attributes::event_sourceAttribute`
ALL-NEXT: unique name: `.?AUevent_sourceAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
@@ -258,8 +248,7 @@ ALL-NEXT: - LF_METHOD [name = `event_sourceAttribute`, # overloads =
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x1022, offset = 0, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `optimize`, Type = 0x1024, offset = 4, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `decorate`, Type = 0x0030 (bool), offset = 8, attrs = public]
-ALL-NEXT: 0x102C | LF_STRUCTURE [size = 108, hash = 238560]
-ALL-NEXT: class name: `__vc_attributes::event_sourceAttribute`
+ALL-NEXT: 0x102C | LF_STRUCTURE [size = 108, hash = 238560] `__vc_attributes::event_sourceAttribute`
ALL-NEXT: unique name: `.?AUevent_sourceAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x102B
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
@@ -270,13 +259,11 @@ ALL-NEXT: - LF_ENUMERATE [service = 3]
ALL-NEXT: - LF_ENUMERATE [unspecified = 4]
ALL-NEXT: - LF_ENUMERATE [EXE = 2]
ALL-NEXT: - LF_ENUMERATE [SERVICE = 3]
-ALL-NEXT: 0x102E | LF_ENUM [size = 104, hash = 115151]
-ALL-NEXT: name: `__vc_attributes::moduleAttribute::type_e`
+ALL-NEXT: 0x102E | LF_ENUM [size = 104, hash = 115151] `__vc_attributes::moduleAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@moduleAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x102D, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x102F | LF_STRUCTURE [size = 96, hash = 197306]
-ALL-NEXT: class name: `__vc_attributes::moduleAttribute`
+ALL-NEXT: 0x102F | LF_STRUCTURE [size = 96, hash = 197306] `__vc_attributes::moduleAttribute`
ALL-NEXT: unique name: `.?AUmoduleAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
@@ -338,8 +325,7 @@ ALL-NEXT: - LF_MEMBER [name = `hidden`, Type = 0x0030 (bool), offset
ALL-NEXT: - LF_MEMBER [name = `restricted`, Type = 0x0030 (bool), offset = 45, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `custom`, Type = 0x1032, offset = 48, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `resource_name`, Type = 0x1032, offset = 52, attrs = public]
-ALL-NEXT: 0x103A | LF_STRUCTURE [size = 96, hash = 98548]
-ALL-NEXT: class name: `__vc_attributes::moduleAttribute`
+ALL-NEXT: 0x103A | LF_STRUCTURE [size = 96, hash = 98548] `__vc_attributes::moduleAttribute`
ALL-NEXT: unique name: `.?AUmoduleAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1039
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
@@ -374,13 +360,11 @@ ALL-NEXT: - LF_ENUMERATE [eModuleUsage = 16777216]
ALL-NEXT: - LF_ENUMERATE [eIllegalUsage = 33554432]
ALL-NEXT: - LF_ENUMERATE [eAsynchronousUsage = 67108864]
ALL-NEXT: - LF_ENUMERATE [eAnyIDLUsage = 4161535]
-ALL-NEXT: 0x103C | LF_ENUM [size = 140, hash = 171328]
-ALL-NEXT: name: `__vc_attributes::helper_attributes::usageAttribute::usage_e`
+ALL-NEXT: 0x103C | LF_ENUM [size = 140, hash = 171328] `__vc_attributes::helper_attributes::usageAttribute::usage_e`
ALL-NEXT: unique name: `.?AW4usage_e@usageAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: field list: 0x103B, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x103D | LF_STRUCTURE [size = 128, hash = 203640]
-ALL-NEXT: class name: `__vc_attributes::helper_attributes::usageAttribute`
+ALL-NEXT: 0x103D | LF_STRUCTURE [size = 128, hash = 203640] `__vc_attributes::helper_attributes::usageAttribute`
ALL-NEXT: unique name: `.?AUusageAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
@@ -397,8 +381,7 @@ ALL-NEXT: - LF_NESTTYPE [name = `usage_e`, parent = 0x103C]
ALL-NEXT: - LF_ONEMETHOD [name = `usageAttribute`]
ALL-NEXT: type = 0x1040, vftable offset = -1, attrs = public
ALL-NEXT: - LF_MEMBER [name = `value`, Type = 0x0075 (unsigned), offset = 0, attrs = public]
-ALL-NEXT: 0x1042 | LF_STRUCTURE [size = 128, hash = 165040]
-ALL-NEXT: class name: `__vc_attributes::helper_attributes::usageAttribute`
+ALL-NEXT: 0x1042 | LF_STRUCTURE [size = 128, hash = 165040] `__vc_attributes::helper_attributes::usageAttribute`
ALL-NEXT: unique name: `.?AUusageAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1041
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
@@ -407,13 +390,11 @@ ALL-NEXT: - LF_ENUMERATE [eBoolean = 0]
ALL-NEXT: - LF_ENUMERATE [eInteger = 1]
ALL-NEXT: - LF_ENUMERATE [eFloat = 2]
ALL-NEXT: - LF_ENUMERATE [eDouble = 3]
-ALL-NEXT: 0x1044 | LF_ENUM [size = 148, hash = 142625]
-ALL-NEXT: name: `__vc_attributes::helper_attributes::v1_alttypeAttribute::type_e`
+ALL-NEXT: 0x1044 | LF_ENUM [size = 148, hash = 142625] `__vc_attributes::helper_attributes::v1_alttypeAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@v1_alttypeAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: field list: 0x1043, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x1045 | LF_STRUCTURE [size = 140, hash = 52534]
-ALL-NEXT: class name: `__vc_attributes::helper_attributes::v1_alttypeAttribute`
+ALL-NEXT: 0x1045 | LF_STRUCTURE [size = 140, hash = 52534] `__vc_attributes::helper_attributes::v1_alttypeAttribute`
ALL-NEXT: unique name: `.?AUv1_alttypeAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
@@ -430,8 +411,7 @@ ALL-NEXT: - LF_NESTTYPE [name = `type_e`, parent = 0x1044]
ALL-NEXT: - LF_ONEMETHOD [name = `v1_alttypeAttribute`]
ALL-NEXT: type = 0x1048, vftable offset = -1, attrs = public
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x1044, offset = 0, attrs = public]
-ALL-NEXT: 0x104A | LF_STRUCTURE [size = 140, hash = 213215]
-ALL-NEXT: class name: `__vc_attributes::helper_attributes::v1_alttypeAttribute`
+ALL-NEXT: 0x104A | LF_STRUCTURE [size = 140, hash = 213215] `__vc_attributes::helper_attributes::v1_alttypeAttribute`
ALL-NEXT: unique name: `.?AUv1_alttypeAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1049
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
@@ -590,147 +570,195 @@ BIG-NEXT: ============================================================
BIG-NEXT: Mod 0000 | Name: `D:\src\llvm\test\tools\llvm-symbolizer\pdb\Inputs\test.obj`:
BIG-NEXT: Obj: `D:\src\llvm\test\tools\llvm-symbolizer\pdb\Inputs\test.obj`:
BIG-NEXT: debug stream: 12, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0001 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_cpu_disp_.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 14, # files: 14, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0002 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_initsect_.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 15, # files: 19, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0003 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_sehprolg4_.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 16, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 1 `f:\dd\vctools\crt\vcstartup\src\eh\i386\sehprolg4.asm`
BIG-NEXT: Mod 0004 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_chandler4gs_.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 17, # files: 14, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0005 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_secchk_.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 18, # files: 14, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0006 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\gs_cookie.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 19, # files: 9, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0007 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\gs_report.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 20, # files: 14, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0008 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\gs_support.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 21, # files: 10, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0009 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\checkcfg.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 22, # files: 14, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0010 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\guard_support.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 23, # files: 10, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0011 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\loadcfg.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 24, # files: 9, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0012 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\dyn_tls_dtor.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 25, # files: 11, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0013 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\dyn_tls_init.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 26, # files: 10, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0014 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\matherr_detection.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 27, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0015 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\ucrt_detection.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 28, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0016 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\argv_mode.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 29, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0017 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\commit_mode.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 30, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0018 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\default_local_stdio_options.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 31, # files: 24, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0019 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\denormal_control.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 32, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0020 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\env_mode.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 33, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0021 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\file_mode.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 34, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0022 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\invalid_parameter_handler.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 35, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0023 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\matherr.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 36, # files: 2, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0024 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\new_mode.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 37, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0025 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\thread_locale.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 38, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0026 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\tncleanup.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 39, # files: 21, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0027 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\exe_main.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 40, # files: 26, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0028 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\initializers.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 41, # files: 20, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0029 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\utility.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 42, # files: 20, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0030 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\ucrt_stubs.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 43, # files: 1, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0031 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\utility_desktop.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 44, # files: 20, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0032 | Name: `f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\default_precision.obj`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\MSVCRT.lib`:
BIG-NEXT: debug stream: 45, # files: 20, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0033 | Name: `Import:KERNEL32.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\um\x86\kernel32.lib`:
BIG-NEXT: debug stream: 47, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0034 | Name: `KERNEL32.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\um\x86\kernel32.lib`:
BIG-NEXT: debug stream: 46, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0035 | Name: `Import:VCRUNTIME140.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\vcruntime.lib`:
BIG-NEXT: debug stream: 49, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0036 | Name: `VCRUNTIME140.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\MI0E91~1.0\VC\LIB\vcruntime.lib`:
BIG-NEXT: debug stream: 48, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0037 | Name: `Import:api-ms-win-crt-stdio-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 59, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0038 | Name: `api-ms-win-crt-stdio-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 58, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0039 | Name: `Import:api-ms-win-crt-runtime-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 57, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0040 | Name: `api-ms-win-crt-runtime-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 56, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0041 | Name: `Import:api-ms-win-crt-math-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 55, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0042 | Name: `api-ms-win-crt-math-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 54, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0043 | Name: `Import:api-ms-win-crt-locale-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 53, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0044 | Name: `api-ms-win-crt-locale-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 52, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0045 | Name: `Import:api-ms-win-crt-heap-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 51, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0046 | Name: `api-ms-win-crt-heap-l1-1-0.dll`:
BIG-NEXT: Obj: `C:\PROGRA~2\WI3CF2~1\10\Lib\10.0.10586.0\ucrt\x86\ucrt.lib`:
BIG-NEXT: debug stream: 50, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 0 ``, src file ni: 0 ``
BIG-NEXT: Mod 0047 | Name: `* Linker *`:
BIG-NEXT: Obj: ``:
BIG-NEXT: debug stream: 60, # files: 0, has ec info: false
+BIG-NEXT: pdb file ni: 55 `{{.*test.pdb}}`, src file ni: 0 ``
BIG: Files
BIG-NEXT: ============================================================
BIG-NEXT: Mod 0000 | `D:\src\llvm\test\tools\llvm-symbolizer\pdb\Inputs\test.obj`:
diff --git a/test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test b/test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test
index 3903c07b027f..dd4c072fe0c9 100644
--- a/test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test
+++ b/test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test
@@ -14,8 +14,7 @@ TPI-TYPES-NEXT: - LF_MEMBER [name = `FooMember`, Type = 0x0403 (void*
TPI-TYPES-NEXT: 0x1002 | LF_ARGLIST [size = 16]
TPI-TYPES-NEXT: 0x0074 (int): `int`
TPI-TYPES-NEXT: 0x1000: `char**`
-TPI-TYPES-NEXT: 0x1003 | LF_STRUCTURE [size = 36]
-TPI-TYPES-NEXT: class name: `FooBar`
+TPI-TYPES-NEXT: 0x1003 | LF_STRUCTURE [size = 36] `FooBar`
TPI-TYPES-NEXT: unique name: `FooBar`
TPI-TYPES-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1001
TPI-TYPES-NEXT: options: has unique name
diff --git a/test/DebugInfo/PDB/pdbdump-mergetypes.test b/test/DebugInfo/PDB/pdbdump-mergetypes.test
index 8ab64cfab516..60cf4a172aa2 100644
--- a/test/DebugInfo/PDB/pdbdump-mergetypes.test
+++ b/test/DebugInfo/PDB/pdbdump-mergetypes.test
@@ -11,8 +11,7 @@ MERGED-NEXT: 0x1000 | LF_POINTER [size = 12]
MERGED-NEXT: referent = 0x0075 (unsigned), mode = pointer, opts = None, kind = ptr32
MERGED-NEXT: 0x1001 | LF_POINTER [size = 12]
MERGED-NEXT: referent = 0x0076 (__int64), mode = pointer, opts = None, kind = ptr32
-MERGED-NEXT: 0x1002 | LF_STRUCTURE [size = 48]
-MERGED-NEXT: class name: `OnlyInMerge1`
+MERGED-NEXT: 0x1002 | LF_STRUCTURE [size = 48] `OnlyInMerge1`
MERGED-NEXT: unique name: `OnlyInMerge1`
MERGED-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
MERGED-NEXT: options: forward ref | has unique name
@@ -29,8 +28,7 @@ MERGED-NEXT: 0x1003: `unsigned**`
MERGED-NEXT: 0x1007 | LF_PROCEDURE [size = 16]
MERGED-NEXT: return type = 0x0075 (unsigned), # args = 0, param list = 0x1006
MERGED-NEXT: calling conv = cdecl, options = None
-MERGED-NEXT: 0x1008 | LF_STRUCTURE [size = 48]
-MERGED-NEXT: class name: `OnlyInMerge2`
+MERGED-NEXT: 0x1008 | LF_STRUCTURE [size = 48] `OnlyInMerge2`
MERGED-NEXT: unique name: `OnlyInMerge2`
MERGED-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
MERGED-NEXT: options: forward ref | has unique name
diff --git a/test/DebugInfo/X86/dbg-declare-inalloca.ll b/test/DebugInfo/X86/dbg-declare-inalloca.ll
index e3f5c7e629b8..e8a310856c10 100644
--- a/test/DebugInfo/X86/dbg-declare-inalloca.ll
+++ b/test/DebugInfo/X86/dbg-declare-inalloca.ll
@@ -55,41 +55,41 @@
; CHECK: .asciz "c"
; CHECK: .cv_def_range [[start]] [[end]]
-; OBJ-LABEL: ProcStart {
+; OBJ-LABEL: {{.*}}Proc{{.*}}Sym {
; OBJ: Kind: S_GPROC32_ID (0x1147)
; OBJ: DisplayName: f
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: NonTrivial (0x1007)
; OBJ: Flags [ (0x1)
; OBJ: IsParameter (0x1)
; OBJ: ]
; OBJ: VarName: a
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 21
; OBJ: BasePointerOffset: 12
; OBJ: }
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x1)
; OBJ: IsParameter (0x1)
; OBJ: ]
; OBJ: VarName: b
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 21
; OBJ: BasePointerOffset: 16
; OBJ: }
; FIXME: Retain unused.
-; OBJ: Local {
+; OBJ: LocalSym {
; OBJ: Type: int (0x74)
; OBJ: Flags [ (0x1)
; OBJ: IsParameter (0x1)
; OBJ: ]
; OBJ: VarName: c
; OBJ: }
-; OBJ: DefRangeRegisterRel {
+; OBJ: DefRangeRegisterRelSym {
; OBJ: BaseRegister: 21
; OBJ: BasePointerOffset: 24
; OBJ: }
diff --git a/test/DebugInfo/dwarfdump-str-offsets.test b/test/DebugInfo/dwarfdump-str-offsets.test
index 0465357ba32a..c09135580fe6 100644
--- a/test/DebugInfo/dwarfdump-str-offsets.test
+++ b/test/DebugInfo/dwarfdump-str-offsets.test
@@ -1,92 +1,94 @@
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-str-offsets.x86_64.o | FileCheck %s
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump-str-offsets.x86_64.o | FileCheck --check-prefix=COMMON \
+RUN: --check-prefix=SPLIT %s
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump-str-offsets-macho.o | FileCheck --check-prefix=COMMON %s
; We are using a hand-constructed object file and are interest in the correct
; diplay of the DW_str_offsetsbase attribute, the correct display of strings
; and the dump of the .debug_str_offsets[.dwo] table.
;
; Abbreviation for DW_AT_str_offsets_base
-CHECK: .debug_abbrev contents:
-CHECK-NOT: contents:
-CHECK: DW_TAG_compile_unit
-CHECK-NOT: DW_TAG
-CHECK: DW_AT_str_offsets_base DW_FORM_sec_offset
+COMMON: .debug_abbrev contents:
+COMMON-NOT: contents:
+COMMON: DW_TAG_compile_unit
+COMMON-NOT: DW_TAG
+COMMON: DW_AT_str_offsets_base DW_FORM_sec_offset
; Verify that strings are displayed correctly as indexed strings
-CHECK: .debug_info contents:
-CHECK-NOT: contents:
-CHECK: DW_TAG_compile_unit
-CHECK-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
-CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_1")
-CHECK-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000008)
-CHECK-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU1")
-CHECK-NOT: NULL
-CHECK: DW_TAG_subprogram
-CHECK-NEXT: DW_AT_name [DW_FORM_strx1] ( indexed (00000003) string = "MyFunc")
-CHECK-NOT: NULL
-CHECK: DW_TAG_variable
-CHECK-NEXT: DW_AT_name [DW_FORM_strx2] ( indexed (00000004) string = "MyVar1")
-CHECK-NOT: NULL
-CHECK: DW_TAG_variable
-CHECK-NEXT: DW_AT_name [DW_FORM_strx3] ( indexed (00000005) string = "MyVar2")
-CHECK-NOT: NULL
-CHECK: DW_TAG_variable
-CHECK-NEXT: DW_AT_name [DW_FORM_strx4] ( indexed (00000006) string = "MyVar3")
+COMMON: .debug_info contents:
+COMMON-NOT: contents:
+COMMON: DW_TAG_compile_unit
+COMMON-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
+COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_1")
+COMMON-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000008)
+COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU1")
+COMMON-NOT: NULL
+COMMON: DW_TAG_subprogram
+COMMON-NEXT: DW_AT_name [DW_FORM_strx1] ( indexed (00000003) string = "MyFunc")
+COMMON-NOT: NULL
+COMMON: DW_TAG_variable
+COMMON-NEXT: DW_AT_name [DW_FORM_strx2] ( indexed (00000004) string = "MyVar1")
+COMMON-NOT: NULL
+COMMON: DW_TAG_variable
+COMMON-NEXT: DW_AT_name [DW_FORM_strx3] ( indexed (00000005) string = "MyVar2")
+COMMON-NOT: NULL
+COMMON: DW_TAG_variable
+COMMON-NEXT: DW_AT_name [DW_FORM_strx4] ( indexed (00000006) string = "MyVar3")
; Second compile unit (b.cpp)
-CHECK: DW_TAG_compile_unit
-CHECK-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
-CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_2")
-CHECK-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x0000002c)
-CHECK-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU2")
+COMMON: DW_TAG_compile_unit
+COMMON-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
+COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_2")
+COMMON-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x0000002c)
+COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU2")
; The split CU
-CHECK: .debug_info.dwo contents:
-CHECK-NOT: contents:
-CHECK: DW_TAG_compile_unit
-CHECK-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade split DWARF producer")
-CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "V5_split_compile_unit")
-CHECK-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000008)
-CHECK-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/splitCU")
+SPLIT: .debug_info.dwo contents:
+SPLIT-NOT: contents:
+SPLIT: DW_TAG_compile_unit
+SPLIT-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade split DWARF producer")
+SPLIT-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "V5_split_compile_unit")
+SPLIT-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000008)
+SPLIT-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/splitCU")
; The type unit
-CHECK: .debug_types contents:
-CHECK: DW_TAG_type_unit
-CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000000) string = "Type_Unit")
-CHECK-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000040)
-CHECK: DW_TAG_structure_type
-CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "MyStruct")
+COMMON: .debug_types contents:
+COMMON: DW_TAG_type_unit
+COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000000) string = "Type_Unit")
+COMMON-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000040)
+COMMON: DW_TAG_structure_type
+COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "MyStruct")
; The split type unit
-CHECK: .debug_types.dwo contents:
-CHECK: DW_TAG_type_unit
-CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000000) string = "V5_split_type_unit")
-CHECK-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x0000001c)
-CHECK: DW_TAG_structure_type
-CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "V5_split_Mystruct")
+SPLIT: .debug_types.dwo contents:
+SPLIT: DW_TAG_type_unit
+SPLIT-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000000) string = "V5_split_type_unit")
+SPLIT-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x0000001c)
+SPLIT: DW_TAG_structure_type
+SPLIT-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "V5_split_Mystruct")
; The .debug_str_offsets section
-CHECK: .debug_str_offsets contents:
-CHECK-NEXT: 0x00000000: Contribution size = 28, Version = 5
-CHECK-NEXT: 0x00000008: 00000000 "Handmade DWARF producer"
-CHECK-NEXT: 0x0000000c: 00000018 "Compile_Unit_1"
-CHECK-NEXT: 0x00000010: 00000027 "/home/test/CU1"
-CHECK-NEXT: 0x00000014: 00000067 "MyFunc"
-CHECK-NEXT: 0x00000018: 0000006e "MyVar1"
-CHECK-NEXT: 0x0000001c: 00000075 "MyVar2"
-CHECK-NEXT: 0x00000020: 0000007c "MyVar3"
-CHECK-NEXT: 0x00000024: Contribution size = 12, Version = 5
-CHECK-NEXT: 0x0000002c: 00000000 "Handmade DWARF producer"
-CHECK-NEXT: 0x00000030: 00000036 "Compile_Unit_2"
-CHECK-NEXT: 0x00000034: 00000045 "/home/test/CU2"
-CHECK-NEXT: 0x00000038: Contribution size = 8, Version = 5
-CHECK-NEXT: 0x00000040: 00000054 "Type_Unit"
-CHECK-NEXT: 0x00000044: 0000005e "MyStruct"
+COMMON: .debug_str_offsets contents:
+COMMON-NEXT: 0x00000000: Contribution size = 28, Version = 5
+COMMON-NEXT: 0x00000008: 00000000 "Handmade DWARF producer"
+COMMON-NEXT: 0x0000000c: 00000018 "Compile_Unit_1"
+COMMON-NEXT: 0x00000010: 00000027 "/home/test/CU1"
+COMMON-NEXT: 0x00000014: 00000067 "MyFunc"
+COMMON-NEXT: 0x00000018: 0000006e "MyVar1"
+COMMON-NEXT: 0x0000001c: 00000075 "MyVar2"
+COMMON-NEXT: 0x00000020: 0000007c "MyVar3"
+COMMON-NEXT: 0x00000024: Contribution size = 12, Version = 5
+COMMON-NEXT: 0x0000002c: 00000000 "Handmade DWARF producer"
+COMMON-NEXT: 0x00000030: 00000036 "Compile_Unit_2"
+COMMON-NEXT: 0x00000034: 00000045 "/home/test/CU2"
+COMMON-NEXT: 0x00000038: Contribution size = 8, Version = 5
+COMMON-NEXT: 0x00000040: 00000054 "Type_Unit"
+COMMON-NEXT: 0x00000044: 0000005e "MyStruct"
-CHECK: .debug_str_offsets.dwo contents:
-CHECK-NEXT: 0x00000000: Contribution size = 12, Version = 5
-CHECK-NEXT: 0x00000008: 00000000 "Handmade split DWARF producer"
-CHECK-NEXT: 0x0000000c: 0000001e "V5_split_compile_unit"
-CHECK-NEXT: 0x00000010: 00000034 "/home/test/splitCU"
-CHECK-NEXT: 0x00000014: Contribution size = 8, Version = 5
-CHECK-NEXT: 0x0000001c: 00000047 "V5_split_type_unit"
-CHECK-NEXT: 0x00000020: 0000005a "V5_split_Mystruct"
+SPLIT: .debug_str_offsets.dwo contents:
+SPLIT-NEXT: 0x00000000: Contribution size = 12, Version = 5
+SPLIT-NEXT: 0x00000008: 00000000 "Handmade split DWARF producer"
+SPLIT-NEXT: 0x0000000c: 0000001e "V5_split_compile_unit"
+SPLIT-NEXT: 0x00000010: 00000034 "/home/test/splitCU"
+SPLIT-NEXT: 0x00000014: Contribution size = 8, Version = 5
+SPLIT-NEXT: 0x0000001c: 00000047 "V5_split_type_unit"
+SPLIT-NEXT: 0x00000020: 0000005a "V5_split_Mystruct"
diff --git a/test/DebugInfo/invalid-relocations.test b/test/DebugInfo/invalid-relocations.test
new file mode 100644
index 000000000000..2252e1a205c3
--- /dev/null
+++ b/test/DebugInfo/invalid-relocations.test
@@ -0,0 +1,35 @@
+# RUN: yaml2obj %s > %t.o
+# RUN: llvm-dwarfdump %t.o 2>&1 | FileCheck %s
+# CHECK: failed to compute relocation: Unknown
+
+!ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_386
+Sections:
+ - Type: SHT_PROGBITS
+ Name: .text
+ Flags: [ ]
+ AddressAlign: 0x04
+ Content: "0000"
+ - Type: SHT_PROGBITS
+ Name: .debug_info
+ Flags: [ ]
+ AddressAlign: 0x04
+ Content: "0000"
+ - Type: SHT_REL
+ Name: .rel.debug_info
+ Link: .symtab
+ Info: .debug_info
+ Relocations:
+ - Offset: 0
+ Symbol: _start
+ Type: 0xFF
+Symbols:
+ Global:
+ - Name: _start
+ Type: STT_FUNC
+ Section: .text
+ Value: 0x0
diff --git a/test/DebugInfo/llvm-symbolizer.test b/test/DebugInfo/llvm-symbolizer.test
index 2c64804659fc..bcad37cf9a48 100644
--- a/test/DebugInfo/llvm-symbolizer.test
+++ b/test/DebugInfo/llvm-symbolizer.test
@@ -10,9 +10,10 @@ RUN: echo "%p/Inputs/dwarfdump-inl-test.elf-x86-64 0x8dc" >> %t.input
RUN: echo "%p/Inputs/dwarfdump-inl-test.elf-x86-64 0xa05" >> %t.input
RUN: echo "%p/Inputs/dwarfdump-inl-test.elf-x86-64 0x987" >> %t.input
RUN: echo "%p/Inputs/dwarfdump-inl-test.high_pc.elf-x86-64 0x568" >> %t.input
-RUN: echo "\"%p/Inputs/dwarfdump-test3.elf-x86-64 space\" 0x640" >> %t.input
-RUN: echo "\"%p/Inputs/dwarfdump-test3.elf-x86-64 space\" 0x633" >> %t.input
-RUN: echo "\"%p/Inputs/dwarfdump-test3.elf-x86-64 space\" 0x62d" >> %t.input
+RUN: cp "%p/Inputs/dwarfdump-test3.elf-x86-64-space" "%T/dwarfdump-test3.elf-x86-64 space"
+RUN: echo "\"%T/dwarfdump-test3.elf-x86-64 space\" 0x640" >> %t.input
+RUN: echo "\"%T/dwarfdump-test3.elf-x86-64 space\" 0x633" >> %t.input
+RUN: echo "\"%T/dwarfdump-test3.elf-x86-64 space\" 0x62d" >> %t.input
RUN: echo "%p/Inputs/macho-universal 0x1f84" >> %t.input
RUN: echo "%p/Inputs/macho-universal:i386 0x1f67" >> %t.input
RUN: echo "%p/Inputs/macho-universal:x86_64 0x100000f05" >> %t.input
diff --git a/test/Instrumentation/MemorySanitizer/unsized_type.ll b/test/Instrumentation/MemorySanitizer/unsized_type.ll
new file mode 100644
index 000000000000..94ae92d3354a
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/unsized_type.ll
@@ -0,0 +1,22 @@
+; Check that unsized token types used by coroutine intrinsics do not cause
+; assertion failures.
+; RUN: opt < %s -msan -S 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
+declare i1 @llvm.coro.alloc(token)
+
+define void @foo() sanitize_memory {
+entry:
+ %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
+ %dyn.alloc.reqd = call i1 @llvm.coro.alloc(token %id)
+ ret void
+}
+
+; CHECK: define void @foo
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %id = call token @llvm.coro.id
+; CHECK-NEXT: call i1 @llvm.coro.alloc(token %id)
+; CHECK-NEXT: ret void
diff --git a/test/Instrumentation/ThreadSanitizer/atomic.ll b/test/Instrumentation/ThreadSanitizer/atomic.ll
index 30c58fea4cb7..3d83d9299e66 100644
--- a/test/Instrumentation/ThreadSanitizer/atomic.ll
+++ b/test/Instrumentation/ThreadSanitizer/atomic.ll
@@ -1959,7 +1959,7 @@ entry:
define void @atomic_signal_fence_acquire() nounwind uwtable {
entry:
- fence singlethread acquire, !dbg !7
+ fence syncscope("singlethread") acquire, !dbg !7
ret void, !dbg !7
}
; CHECK-LABEL: atomic_signal_fence_acquire
@@ -1975,7 +1975,7 @@ entry:
define void @atomic_signal_fence_release() nounwind uwtable {
entry:
- fence singlethread release, !dbg !7
+ fence syncscope("singlethread") release, !dbg !7
ret void, !dbg !7
}
; CHECK-LABEL: atomic_signal_fence_release
@@ -1991,7 +1991,7 @@ entry:
define void @atomic_signal_fence_acq_rel() nounwind uwtable {
entry:
- fence singlethread acq_rel, !dbg !7
+ fence syncscope("singlethread") acq_rel, !dbg !7
ret void, !dbg !7
}
; CHECK-LABEL: atomic_signal_fence_acq_rel
@@ -2007,7 +2007,7 @@ entry:
define void @atomic_signal_fence_seq_cst() nounwind uwtable {
entry:
- fence singlethread seq_cst, !dbg !7
+ fence syncscope("singlethread") seq_cst, !dbg !7
ret void, !dbg !7
}
; CHECK-LABEL: atomic_signal_fence_seq_cst
diff --git a/test/LTO/Resolution/X86/linker-redef-thin.ll b/test/LTO/Resolution/X86/linker-redef-thin.ll
new file mode 100644
index 000000000000..ebaac8094e75
--- /dev/null
+++ b/test/LTO/Resolution/X86/linker-redef-thin.ll
@@ -0,0 +1,16 @@
+; RUN: opt -module-summary %s -o %t.o
+; RUN: llvm-lto2 run -o %t1.o %t.o -r %t.o,patatino,pr
+; RUN: llvm-readobj -t %t1.o.0 | FileCheck %s
+
+; CHECK: Name: patatino
+; CHECK-NEXT: Value:
+; CHECK-NEXT: Size:
+; CHECK-NEXT: Binding: Weak
+; CHECK-NEXT: Type: Function
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @patatino() {
+ ret void
+}
diff --git a/test/Linker/Inputs/syncscope-1.ll b/test/Linker/Inputs/syncscope-1.ll
new file mode 100644
index 000000000000..90578e931dd5
--- /dev/null
+++ b/test/Linker/Inputs/syncscope-1.ll
@@ -0,0 +1,6 @@
+define void @syncscope_1() {
+ fence syncscope("agent") seq_cst
+ fence syncscope("workgroup") seq_cst
+ fence syncscope("wavefront") seq_cst
+ ret void
+}
diff --git a/test/Linker/Inputs/syncscope-2.ll b/test/Linker/Inputs/syncscope-2.ll
new file mode 100644
index 000000000000..527c5bf93d00
--- /dev/null
+++ b/test/Linker/Inputs/syncscope-2.ll
@@ -0,0 +1,6 @@
+define void @syncscope_2() {
+ fence syncscope("image") seq_cst
+ fence syncscope("agent") seq_cst
+ fence syncscope("workgroup") seq_cst
+ ret void
+}
diff --git a/test/Linker/Inputs/thumb-module-inline-asm.ll b/test/Linker/Inputs/thumb-module-inline-asm.ll
new file mode 100644
index 000000000000..7792ff96d5b5
--- /dev/null
+++ b/test/Linker/Inputs/thumb-module-inline-asm.ll
@@ -0,0 +1,3 @@
+target triple = "thumbv7-linux-gnueabihf"
+
+module asm "orn r1, r2, r2"
diff --git a/test/Linker/link-arm-and-thumb-module-inline-asm.ll b/test/Linker/link-arm-and-thumb-module-inline-asm.ll
new file mode 100644
index 000000000000..13779f37ffa0
--- /dev/null
+++ b/test/Linker/link-arm-and-thumb-module-inline-asm.ll
@@ -0,0 +1,20 @@
+; This test checks that proper directives to switch between ARM and Thumb mode
+; are added when linking ARM and Thumb modules.
+
+; RUN: llvm-as %s -o %t1.bc
+; RUN: llvm-as %p/Inputs/thumb-module-inline-asm.ll -o %t2.bc
+; RUN: llvm-link %t1.bc %t2.bc -S 2> %t3.out | FileCheck %s
+
+target triple = "armv7-linux-gnueabihf"
+
+module asm "add r1, r2, r2"
+
+; CHECK: .text
+; CHECK-NEXT: .balign 4
+; CHECK-NEXT: .arm
+; CHECK-NEXT: add r1, r2, r2
+; CHECK-NEXT: module asm
+; CHECK-NEXT: .text
+; CHECK-NEXT: .balign 2
+; CHECK-NEXT: .thumb
+; CHECK-NEXT: orn r1, r2, r2
diff --git a/test/Linker/syncscopes.ll b/test/Linker/syncscopes.ll
new file mode 100644
index 000000000000..a572c23cffbd
--- /dev/null
+++ b/test/Linker/syncscopes.ll
@@ -0,0 +1,11 @@
+; RUN: llvm-link %S/Inputs/syncscope-1.ll %S/Inputs/syncscope-2.ll -S | FileCheck %s
+
+; CHECK-LABEL: define void @syncscope_1
+; CHECK: fence syncscope("agent") seq_cst
+; CHECK: fence syncscope("workgroup") seq_cst
+; CHECK: fence syncscope("wavefront") seq_cst
+
+; CHECK-LABEL: define void @syncscope_2
+; CHECK: fence syncscope("image") seq_cst
+; CHECK: fence syncscope("agent") seq_cst
+; CHECK: fence syncscope("workgroup") seq_cst
diff --git a/test/MC/AArch64/label-arithmetic-diags-elf.s b/test/MC/AArch64/label-arithmetic-diags-elf.s
index dbfdd24f8dc9..2ef67fafb2ea 100644
--- a/test/MC/AArch64/label-arithmetic-diags-elf.s
+++ b/test/MC/AArch64/label-arithmetic-diags-elf.s
@@ -5,7 +5,7 @@ b:
.fill 300
e:
.byte e - b
- // CHECK: error: value evaluated as 300 is out of range.
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: value evaluated as 300 is out of range.
// CHECK-NEXT: .byte e - b
// CHECK-NEXT: ^
@@ -14,67 +14,74 @@ start:
.space 5000
end:
add w0, w1, #(end - start)
- cmp w0, #(end - start)
- // CHECK: error: fixup value out of range
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: fixup value out of range
// CHECK-NEXT: add w0, w1, #(end - start)
// CHECK-NEXT: ^
- // CHECK: error: fixup value out of range
+
+ cmp w0, #(end - start)
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: fixup value out of range
// CHECK-NEXT: cmp w0, #(end - start)
// CHECK-NEXT: ^
negative:
add w0, w1, #(end - negative)
- cmp w0, #(end - negative)
- // CHECK: error: fixup value out of range
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: fixup value out of range
// CHECK-NEXT: add w0, w1, #(end - negative)
// CHECK-NEXT: ^
- // CHECK: error: fixup value out of range
+
+ cmp w0, #(end - negative)
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: fixup value out of range
// CHECK-NEXT: cmp w0, #(end - negative)
// CHECK-NEXT: ^
add w0, w1, #(end - external)
- cmp w0, #(end - external)
- // CHECK: error: symbol 'external' can not be undefined in a subtraction expression
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: symbol 'external' can not be undefined in a subtraction expression
// CHECK-NEXT: add w0, w1, #(end - external)
// CHECK-NEXT: ^
- // CHECK: error: symbol 'external' can not be undefined in a subtraction expression
+
+ cmp w0, #(end - external)
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: symbol 'external' can not be undefined in a subtraction expression
// CHECK-NEXT: cmp w0, #(end - external)
// CHECK-NEXT: ^
add w0, w1, #:lo12:external - end
- cmp w0, #:lo12:external - end
- // CHECK: error: Unsupported pc-relative fixup kind
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: Unsupported pc-relative fixup kind
// CHECK-NEXT: add w0, w1, #:lo12:external - end
// CHECK-NEXT: ^
- // CHECK: error: Unsupported pc-relative fixup kind
+
+ cmp w0, #:lo12:external - end
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: Unsupported pc-relative fixup kind
// CHECK-NEXT: cmp w0, #:lo12:external - end
// CHECK-NEXT: ^
add w0, w1, #:got_lo12:external - end
- cmp w0, #:got_lo12:external - end
- // CHECK: error: Unsupported pc-relative fixup kind
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: Unsupported pc-relative fixup kind
// CHECK-NEXT: add w0, w1, #:got_lo12:external - end
// CHECK-NEXT: ^
- // CHECK: error: Unsupported pc-relative fixup kind
+
+ cmp w0, #:got_lo12:external - end
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: Unsupported pc-relative fixup kind
// CHECK-NEXT: cmp w0, #:got_lo12:external - end
// CHECK-NEXT: ^
.section sec_y
end_across_sec:
add w0, w1, #(end_across_sec - start)
- cmp w0, #(end_across_sec - start)
- // CHECK: error: Cannot represent a difference across sections
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: Cannot represent a difference across sections
// CHECK-NEXT: add w0, w1, #(end_across_sec - start)
// CHECK-NEXT: ^
- // CHECK: error: Cannot represent a difference across sections
+
+ cmp w0, #(end_across_sec - start)
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: Cannot represent a difference across sections
// CHECK-NEXT: cmp w0, #(end_across_sec - start)
// CHECK-NEXT: ^
add w0, w1, #(sec_y - sec_x)
- cmp w0, #(sec_y - sec_x)
- // CHECK: error: Cannot represent a difference across sections
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: Cannot represent a difference across sections
// CHECK-NEXT: add w0, w1, #(sec_y - sec_x)
// CHECK-NEXT: ^
- // CHECK: error: Cannot represent a difference across sections
+
+ cmp w0, #(sec_y - sec_x)
+ // CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: Cannot represent a difference across sections
// CHECK-NEXT: cmp w0, #(sec_y - sec_x)
// CHECK-NEXT: ^
diff --git a/test/MC/AMDGPU/gfx9_asm_all.s b/test/MC/AMDGPU/gfx9_asm_all.s
index 0c3dbd221a49..56484a37bdce 100644
--- a/test/MC/AMDGPU/gfx9_asm_all.s
+++ b/test/MC/AMDGPU/gfx9_asm_all.s
@@ -104933,3 +104933,462 @@ v_cmpx_t_u32_sdwa s[6:7], v1, v2 src0_sel:DWORD src1_sel:WORD_1
v_cmpx_t_u32_sdwa s[6:7], v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x86,0x06,0x0e]
+
+v_mad_mix_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v255, v1, v2, v3
+// CHECK: [0xff,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0xff,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x65,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x66,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x67,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x6a,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x6b,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x7c,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x7e,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x7f,0x04,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v255, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xff,0x0f,0x1c]
+
+v_mad_mix_f32 v5, v1, s2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, s101, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xcb,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, flat_scratch_lo, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xcd,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, flat_scratch_hi, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xcf,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, vcc_lo, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xd5,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, vcc_hi, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xd7,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, m0, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xf9,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, exec_lo, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xfd,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, exec_hi, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0xff,0x0c,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, v255
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0xfe,0x1f]
+
+v_mad_mix_f32 v5, v1, v2, s3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x18]
+
+v_mad_mix_f32 v5, v1, v2, s101
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x96,0x19]
+
+v_mad_mix_f32 v5, v1, v2, flat_scratch_lo
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x9a,0x19]
+
+v_mad_mix_f32 v5, v1, v2, flat_scratch_hi
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x9e,0x19]
+
+v_mad_mix_f32 v5, v1, v2, vcc_lo
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0xaa,0x19]
+
+v_mad_mix_f32 v5, v1, v2, vcc_hi
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0xae,0x19]
+
+v_mad_mix_f32 v5, v1, v2, m0
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0xf2,0x19]
+
+v_mad_mix_f32 v5, v1, v2, exec_lo
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0xfa,0x19]
+
+v_mad_mix_f32 v5, v1, v2, exec_hi
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0xfe,0x19]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel:[0,0,0]
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel:[1,0,0]
+// CHECK: [0x05,0x48,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel:[0,1,0]
+// CHECK: [0x05,0x50,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel:[0,0,1]
+// CHECK: [0x05,0x60,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel:[1,1,1]
+// CHECK: [0x05,0x78,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel_hi:[1,1,1]
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel_hi:[0,0,0]
+// CHECK: [0x05,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel_hi:[1,0,0]
+// CHECK: [0x05,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x0c]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel_hi:[0,1,0]
+// CHECK: [0x05,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x14]
+
+v_mad_mix_f32 v5, v1, v2, v3 op_sel_hi:[0,0,1]
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mix_f32 v5, -v1, v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x3c]
+
+v_mad_mix_f32 v5, v1, -v2, v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x5c]
+
+v_mad_mix_f32 v5, v1, v2, -v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x9c]
+
+v_mad_mix_f32 v5, -v1, -v2, -v3
+// CHECK: [0x05,0x40,0xa0,0xd3,0x01,0x05,0x0e,0xfc]
+
+v_mad_mix_f32 v5, |v1|, v2, v3
+// CHECK: [0x05,0x41,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, |v2|, v3
+// CHECK: [0x05,0x42,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, |v3|
+// CHECK: [0x05,0x44,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, |v1|, |v2|, |v3|
+// CHECK: [0x05,0x47,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v5, v1, v2, v3 clamp
+// CHECK: [0x05,0xc0,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v255, v1, v2, v3
+// CHECK: [0xff,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v255, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0xff,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, s1, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, s101, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x65,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x66,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x67,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x6a,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x6b,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, m0, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x7c,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x7e,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x7f,0x04,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v255, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xff,0x0f,0x1c]
+
+v_mad_mixhi_f16 v5, v1, s2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, s101, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xcb,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, flat_scratch_lo, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xcd,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, flat_scratch_hi, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xcf,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, vcc_lo, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xd5,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, vcc_hi, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xd7,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, m0, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xf9,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, exec_lo, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xfd,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, exec_hi, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0xff,0x0c,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v255
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0xfe,0x1f]
+
+v_mad_mixhi_f16 v5, v1, v2, s3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x18]
+
+v_mad_mixhi_f16 v5, v1, v2, s101
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x96,0x19]
+
+v_mad_mixhi_f16 v5, v1, v2, flat_scratch_lo
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x9a,0x19]
+
+v_mad_mixhi_f16 v5, v1, v2, flat_scratch_hi
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x9e,0x19]
+
+v_mad_mixhi_f16 v5, v1, v2, vcc_lo
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0xaa,0x19]
+
+v_mad_mixhi_f16 v5, v1, v2, vcc_hi
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0xae,0x19]
+
+v_mad_mixhi_f16 v5, v1, v2, m0
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0xf2,0x19]
+
+v_mad_mixhi_f16 v5, v1, v2, exec_lo
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0xfa,0x19]
+
+v_mad_mixhi_f16 v5, v1, v2, exec_hi
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0xfe,0x19]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel:[0,0,0]
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel:[1,0,0]
+// CHECK: [0x05,0x48,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel:[0,1,0]
+// CHECK: [0x05,0x50,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel:[0,0,1]
+// CHECK: [0x05,0x60,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel:[1,1,1]
+// CHECK: [0x05,0x78,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel_hi:[1,1,1]
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel_hi:[0,0,0]
+// CHECK: [0x05,0x00,0xa2,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel_hi:[1,0,0]
+// CHECK: [0x05,0x00,0xa2,0xd3,0x01,0x05,0x0e,0x0c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel_hi:[0,1,0]
+// CHECK: [0x05,0x00,0xa2,0xd3,0x01,0x05,0x0e,0x14]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 op_sel_hi:[0,0,1]
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mixhi_f16 v5, -v1, v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x3c]
+
+v_mad_mixhi_f16 v5, v1, -v2, v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x5c]
+
+v_mad_mixhi_f16 v5, v1, v2, -v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x9c]
+
+v_mad_mixhi_f16 v5, -v1, -v2, -v3
+// CHECK: [0x05,0x40,0xa2,0xd3,0x01,0x05,0x0e,0xfc]
+
+v_mad_mixhi_f16 v5, |v1|, v2, v3
+// CHECK: [0x05,0x41,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, |v2|, v3
+// CHECK: [0x05,0x42,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, |v3|
+// CHECK: [0x05,0x44,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, |v1|, |v2|, |v3|
+// CHECK: [0x05,0x47,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixhi_f16 v5, v1, v2, v3 clamp
+// CHECK: [0x05,0xc0,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v255, v1, v2, v3
+// CHECK: [0xff,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v255, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0xff,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, s1, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, s101, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x65,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x66,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x67,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x6a,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x6b,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, m0, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x7c,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x7e,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x7f,0x04,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v255, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xff,0x0f,0x1c]
+
+v_mad_mixlo_f16 v5, v1, s2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, s101, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xcb,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, flat_scratch_lo, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xcd,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, flat_scratch_hi, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xcf,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, vcc_lo, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xd5,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, vcc_hi, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xd7,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, m0, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xf9,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, exec_lo, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xfd,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, exec_hi, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0xff,0x0c,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v255
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0xfe,0x1f]
+
+v_mad_mixlo_f16 v5, v1, v2, s3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x18]
+
+v_mad_mixlo_f16 v5, v1, v2, s101
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x96,0x19]
+
+v_mad_mixlo_f16 v5, v1, v2, flat_scratch_lo
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x9a,0x19]
+
+v_mad_mixlo_f16 v5, v1, v2, flat_scratch_hi
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x9e,0x19]
+
+v_mad_mixlo_f16 v5, v1, v2, vcc_lo
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0xaa,0x19]
+
+v_mad_mixlo_f16 v5, v1, v2, vcc_hi
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0xae,0x19]
+
+v_mad_mixlo_f16 v5, v1, v2, m0
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0xf2,0x19]
+
+v_mad_mixlo_f16 v5, v1, v2, exec_lo
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0xfa,0x19]
+
+v_mad_mixlo_f16 v5, v1, v2, exec_hi
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0xfe,0x19]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel:[0,0,0]
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel:[1,0,0]
+// CHECK: [0x05,0x48,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel:[0,1,0]
+// CHECK: [0x05,0x50,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel:[0,0,1]
+// CHECK: [0x05,0x60,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel:[1,1,1]
+// CHECK: [0x05,0x78,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel_hi:[1,1,1]
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel_hi:[0,0,0]
+// CHECK: [0x05,0x00,0xa1,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel_hi:[1,0,0]
+// CHECK: [0x05,0x00,0xa1,0xd3,0x01,0x05,0x0e,0x0c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel_hi:[0,1,0]
+// CHECK: [0x05,0x00,0xa1,0xd3,0x01,0x05,0x0e,0x14]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 op_sel_hi:[0,0,1]
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mixlo_f16 v5, -v1, v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x3c]
+
+v_mad_mixlo_f16 v5, v1, -v2, v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x5c]
+
+v_mad_mixlo_f16 v5, v1, v2, -v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x9c]
+
+v_mad_mixlo_f16 v5, -v1, -v2, -v3
+// CHECK: [0x05,0x40,0xa1,0xd3,0x01,0x05,0x0e,0xfc]
+
+v_mad_mixlo_f16 v5, |v1|, v2, v3
+// CHECK: [0x05,0x41,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, |v2|, v3
+// CHECK: [0x05,0x42,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, |v3|
+// CHECK: [0x05,0x44,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, |v1|, |v2|, |v3|
+// CHECK: [0x05,0x47,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mixlo_f16 v5, v1, v2, v3 clamp
+// CHECK: [0x05,0xc0,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
diff --git a/test/MC/AMDGPU/vop3p-err.s b/test/MC/AMDGPU/vop3p-err.s
index f4b1a3da714f..bc6f6100f327 100644
--- a/test/MC/AMDGPU/vop3p-err.s
+++ b/test/MC/AMDGPU/vop3p-err.s
@@ -71,47 +71,6 @@ v_pk_add_u16 v1, abs(v2), v3
// GFX9: :19: error: invalid operand for instruction
v_pk_add_u16 v1, -v2, v3
-
-//
-// Packed operands on the non-packed VOP3P instructions
-//
-
-// GFX9: invalid operand for instruction
-v_mad_mix_f32 v1, v2, v3, v4 op_sel:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mix_f32 v1, v2, v3, v4 op_sel_hi:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mix_f32 v1, v2, v3, v4 neg_lo:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mix_f32 v1, v2, v3, v4 neg_hi:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mixlo_f16 v1, v2, v3, v4 op_sel:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mixlo_f16 v1, v2, v3, v4 op_sel_hi:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mixlo_f16 v1, v2, v3, v4 neg_lo:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mixlo_f16 v1, v2, v3, v4 neg_hi:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mixhi_f16 v1, v2, v3, v4 op_sel:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mixhi_f16 v1, v2, v3, v4 op_sel_hi:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mixhi_f16 v1, v2, v3, v4 neg_lo:[0,0,0]
-
-// GFX9: invalid operand for instruction
-v_mad_mixhi_f16 v1, v2, v3, v4 neg_hi:[0,0,0]
-
//
// Constant bus restrictions
//
diff --git a/test/MC/AMDGPU/vop3p.s b/test/MC/AMDGPU/vop3p.s
index c9eda69e13d2..97c3650cdf54 100644
--- a/test/MC/AMDGPU/vop3p.s
+++ b/test/MC/AMDGPU/vop3p.s
@@ -169,48 +169,81 @@ v_pk_max_f16 v0, v1, v2
// GFX9: v_pk_max_f16 v0, v1, v2 ; encoding: [0x00,0x00,0x92,0xd3,0x01,0x05,0x02,0x18]
v_mad_mix_f32 v0, v1, v2, v3
-// GFX9: v_mad_mix_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 ; encoding: [0x00,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
v_mad_mixlo_f16 v0, v1, v2, v3
-// GFX9: v_mad_mixlo_f16 v0, v1, v2, v3 ; encoding: [0x00,0x00,0xa1,0xd3,0x01,0x05,0x0e,0x04]
+// GFX9: v_mad_mixlo_f16 v0, v1, v2, v3 ; encoding: [0x00,0x40,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
v_mad_mixhi_f16 v0, v1, v2, v3
-// GFX9: v_mad_mixhi_f16 v0, v1, v2, v3 ; encoding: [0x00,0x00,0xa2,0xd3,0x01,0x05,0x0e,0x04]
-
+// GFX9: v_mad_mixhi_f16 v0, v1, v2, v3 ; encoding: [0x00,0x40,0xa2,0xd3,0x01,0x05,0x0e,0x1c]
//
// Regular source modifiers on non-packed instructions
//
v_mad_mix_f32 v0, abs(v1), v2, v3
-// GFX9: v_mad_mix_f32 v0, |v1|, v2, v3 ; encoding: [0x00,0x01,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+// GFX9: v_mad_mix_f32 v0, |v1|, v2, v3 ; encoding: [0x00,0x41,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
v_mad_mix_f32 v0, v1, abs(v2), v3
-// GFX9: v_mad_mix_f32 v0, v1, |v2|, v3 ; encoding: [0x00,0x02,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+// GFX9: v_mad_mix_f32 v0, v1, |v2|, v3 ; encoding: [0x00,0x42,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
v_mad_mix_f32 v0, v1, v2, abs(v3)
-// GFX9: v_mad_mix_f32 v0, v1, v2, |v3| ; encoding: [0x00,0x04,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+// GFX9: v_mad_mix_f32 v0, v1, v2, |v3| ; encoding: [0x00,0x44,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
v_mad_mix_f32 v0, -v1, v2, v3
-// GFX9: v_mad_mix_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x24]
+// GFX9: v_mad_mix_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x3c]
v_mad_mix_f32 v0, v1, -v2, v3
-// GFX9: v_mad_mix_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x44]
+// GFX9: v_mad_mix_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x5c]
v_mad_mix_f32 v0, v1, v2, -v3
-// GFX9: v_mad_mix_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x84]
+// GFX9: v_mad_mix_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x9c]
v_mad_mix_f32 v0, -abs(v1), v2, v3
-// GFX9: v_mad_mix_f32 v0, -|v1|, v2, v3 ; encoding: [0x00,0x01,0xa0,0xd3,0x01,0x05,0x0e,0x24]
+// GFX9: v_mad_mix_f32 v0, -|v1|, v2, v3 ; encoding: [0x00,0x41,0xa0,0xd3,0x01,0x05,0x0e,0x3c]
v_mad_mix_f32 v0, v1, -abs(v2), v3
-// GFX9: v_mad_mix_f32 v0, v1, -|v2|, v3 ; encoding: [0x00,0x02,0xa0,0xd3,0x01,0x05,0x0e,0x44]
+// GFX9: v_mad_mix_f32 v0, v1, -|v2|, v3 ; encoding: [0x00,0x42,0xa0,0xd3,0x01,0x05,0x0e,0x5c]
v_mad_mix_f32 v0, v1, v2, -abs(v3)
-// GFX9: v_mad_mix_f32 v0, v1, v2, -|v3| ; encoding: [0x00,0x04,0xa0,0xd3,0x01,0x05,0x0e,0x84]
+// GFX9: v_mad_mix_f32 v0, v1, v2, -|v3| ; encoding: [0x00,0x44,0xa0,0xd3,0x01,0x05,0x0e,0x9c]
v_mad_mixlo_f16 v0, abs(v1), -v2, abs(v3)
-// GFX9: v_mad_mixlo_f16 v0, |v1|, -v2, |v3| ; encoding: [0x00,0x05,0xa1,0xd3,0x01,0x05,0x0e,0x44]
+// GFX9: v_mad_mixlo_f16 v0, |v1|, -v2, |v3| ; encoding: [0x00,0x45,0xa1,0xd3,0x01,0x05,0x0e,0x5c]
v_mad_mixhi_f16 v0, -v1, abs(v2), -abs(v3)
-// GFX9: v_mad_mixhi_f16 v0, -v1, |v2|, -|v3| ; encoding: [0x00,0x06,0xa2,0xd3,0x01,0x05,0x0e,0xa4]
+// GFX9: v_mad_mixhi_f16 v0, -v1, |v2|, -|v3| ; encoding: [0x00,0x46,0xa2,0xd3,0x01,0x05,0x0e,0xbc]
+
+//
+// op_sel with non-packed instructions
+//
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel:[0,0,0]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 ; encoding: [0x00,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel:[1,0,0]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x00,0x48,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel:[0,1,0]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 op_sel:[0,1,0] ; encoding: [0x00,0x50,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel:[0,0,1]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x00,0x60,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel:[1,1,1]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 op_sel:[1,1,1] ; encoding: [0x00,0x78,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[0,0,0]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[0,0,0] ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[1,0,0]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[1,0,0] ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x0c]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[0,1,0]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[0,1,0] ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x14]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[0,0,1]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[0,0,1] ; encoding: [0x00,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mix_f32 v0, v1, v2, v3 op_sel_hi:[1,1,1]
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 ; encoding: [0x00,0x40,0xa0,0xd3,0x01,0x05,0x0e,0x1c]
diff --git a/test/MC/ARM/elf-movt.s b/test/MC/ARM/elf-movt.s
index 9df7a603b71a..858e4aa41b29 100644
--- a/test/MC/ARM/elf-movt.s
+++ b/test/MC/ARM/elf-movt.s
@@ -14,8 +14,20 @@ barf: @ @barf
movw r0, :lower16:GOT-(.LPC0_2+8)
movt r0, :upper16:GOT-(.LPC0_2+8)
.LPC0_2:
+ movw r0, :lower16:extern_symbol+1234
+ movt r0, :upper16:extern_symbol+1234
+
+ movw r0, :lower16:(foo - bar + 1234)
+ movt r0, :upper16:(foo - bar + 1234)
+foo:
+bar:
+
@ ASM: movw r0, :lower16:(GOT-(.LPC0_2+8))
@ ASM-NEXT: movt r0, :upper16:(GOT-(.LPC0_2+8))
+@ ASM: movw r0, :lower16:(extern_symbol+1234)
+@ ASM-NEXT: movt r0, :upper16:(extern_symbol+1234)
+@ ASM: movw r0, :lower16:((foo-bar)+1234)
+@ ASM-NEXT: movt r0, :upper16:((foo-bar)+1234)
@OBJ: Disassembly of section .text:
@OBJ-NEXT: barf:
@@ -23,6 +35,12 @@ barf: @ @barf
@OBJ-NEXT: 00000000: R_ARM_MOVW_PREL_NC GOT
@OBJ-NEXT: 4: f4 0f 4f e3 movt r0, #65524
@OBJ-NEXT: 00000004: R_ARM_MOVT_PREL GOT
+@OBJ-NEXT: 8: d2 04 00 e3 movw r0, #1234
+@OBJ-NEXT: 00000008: R_ARM_MOVW_ABS_NC extern_symbol
+@OBJ-NEXT: c: d2 04 40 e3 movt r0, #1234
+@OBJ-NEXT: 0000000c: R_ARM_MOVT_ABS extern_symbol
+@OBJ-NEXT: 10: d2 04 00 e3 movw r0, #1234
+@OBJ-NEXT: 14: 00 00 40 e3 movt r0, #0
@THUMB: Disassembly of section .text:
@THUMB-NEXT: barf:
@@ -30,3 +48,9 @@ barf: @ @barf
@THUMB-NEXT: 00000000: R_ARM_THM_MOVW_PREL_NC GOT
@THUMB-NEXT: 4: cf f6 f4 70 movt r0, #65524
@THUMB-NEXT: 00000004: R_ARM_THM_MOVT_PREL GOT
+@THUMB-NEXT: 8: 40 f2 d2 40 movw r0, #1234
+@THUMB-NEXT: 00000008: R_ARM_THM_MOVW_ABS_NC extern_symbol
+@THUMB-NEXT: c: c0 f2 d2 40 movt r0, #1234
+@THUMB-NEXT: 0000000c: R_ARM_THM_MOVT_ABS extern_symbol
+@THUMB-NEXT: 10: 40 f2 d2 40 movw r0, #1234
+@THUMB-NEXT: 14: c0 f2 00 00 movt r0, #0
diff --git a/test/MC/ARM/invalid-instructions-spellcheck.s b/test/MC/ARM/invalid-instructions-spellcheck.s
new file mode 100644
index 000000000000..ca118cff6ddf
--- /dev/null
+++ b/test/MC/ARM/invalid-instructions-spellcheck.s
@@ -0,0 +1,68 @@
+@ RUN: not llvm-mc -triple=arm -show-encoding < %s 2>&1 | FileCheck %s
+@ RUN: not llvm-mc -triple=thumb -show-encoding < %s 2>&1 | FileCheck %s --check-prefix=CHECK-THUMB
+
+@ This tests the mnemonic spell checker.
+
+@ First check what happens when an instruction is omitted:
+
+ r1, r2, r3
+
+@ CHECK: error: unexpected token in operand
+@ CHECK-NEXT: r1, r2, r3
+@ CHECK-NEXT: ^
+
+@ We don't want to see a suggestion here; the edit distance is too large to
+@ give sensible suggestions:
+
+ aaaaaaaaaaaaaaa r1, r2, r3
+
+@ CHECK: error: invalid instruction
+@ CHECK-NEXT: aaaaaaaaaaaaaaa r1, r2, r3
+@ CHECK-NEXT: ^
+
+@ Check that we get one suggestion: 'pushh' is 1 edit away, i.e. an deletion.
+
+ pushh r1, r2, r3
+
+@CHECK: error: invalid instruction, did you mean: push?
+@CHECK-NEXT: pushh r1, r2, r3
+@CHECK-NEXT: ^
+
+ adXd r1, r2, r3
+
+@ Check edit distance 1 and 2: 'add' has edit distance of 1 (a deletion),
+@ and 'qadd' a distance of 2 (a deletion and an insertion)
+
+@ CHECK: error: invalid instruction, did you mean: add, qadd?
+@ CHECK-NEXT: adXd r1, r2, r3
+@ CHECK-NEXT: ^
+
+@ Check edit distance 1 and 2, just insertions:
+
+ ad r1, r2, r3
+
+@ CHECK: error: invalid instruction, did you mean: adc, add, adr, and, qadd?
+@ CHECK-NEXT: ad r1, r2, r3
+@ CHECK-NEXT: ^
+
+@ Check an instruction that is 2 edits away, and also has a lot of candidates:
+
+ ldre r1, r2, r3
+
+@ CHECK: error: invalid instruction, did you mean: ldr, ldrb, ldrd, ldrex, ldrexb, ldrexd, ldrexh, ldrh, ldrt?
+@ CHECK-NEXT: ldre r1, r2, r3
+@ CHECK-NEXT: ^
+
+@ Here it is checked that we don't suggest instructions that are not supported.
+@ For example, in Thumb mode we don't want to see suggestions 'faddd' of 'qadd'
+@ because they are not supported.
+
+ fadd r1, r2, r3
+
+@ CHECK-THUMB: error: invalid instruction, did you mean: add?
+@ CHECK-THUMB: fadd r1, r2, r3
+@ CHECK-THUMB: ^
+
+@ CHECK: error: invalid instruction, did you mean: add, qadd?
+@ CHECK-NEXT: fadd r1, r2, r3
+@ CHECK-NEXT: ^
diff --git a/test/MC/ARM/ldr-pseudo-unpredictable.s b/test/MC/ARM/ldr-pseudo-unpredictable.s
index b275dc71ab4b..ad5a176e0433 100644
--- a/test/MC/ARM/ldr-pseudo-unpredictable.s
+++ b/test/MC/ARM/ldr-pseudo-unpredictable.s
@@ -1,8 +1,8 @@
@RUN: llvm-mc -triple armv5-unknown-linux-gnueabi %s | FileCheck --check-prefix=CHECK-ARM %s
-@RUN: not llvm-mc -triple thumbv7-unknown-linux-gnueabi %s 2>&1 | FileCheck --check-prefix=CHECK-SP %s
+@RUN: llvm-mc -triple thumbv7-unknown-linux-gnueabi %s 2>&1 | FileCheck --check-prefix=CHECK-T2 %s
@RUN: not llvm-mc -triple thumbv5-unknown-linux-gnueabi %s 2>&1 | FileCheck --check-prefix=CHECK-NONE %s
@RUN: llvm-mc -triple armv5-base-apple-darwin %s | FileCheck --check-prefix=CHECK-DARWIN-ARM %s
-@RUN: not llvm-mc -triple thumbv7-base-apple-darwin %s 2>&1 | FileCheck --check-prefix=CHECK-DARWIN-SP %s
+@RUN: llvm-mc -triple thumbv7-base-apple-darwin %s 2>&1 | FileCheck --check-prefix=CHECK-DARWIN-T2 %s
@RUN: not llvm-mc -triple thumbv5-base.apple.darwin %s 2>&1 | FileCheck --check-prefix=CHECK-NONE %s
@ We dont't do the transformation for rt = sp or pc
@@ -10,12 +10,12 @@
ldr pc, = 0x4
@ CHECK-ARM: ldr pc, .Ltmp[[TMP0:[0-9]+]]
@ CHECK-DARWIN-ARM: ldr pc, Ltmp0
-@ CHECK-SP: error: instruction requires: arm-mode
-@ CHECK-DARWIN-SP: error: instruction requires: arm-mode
-@ CHECK-NONE: error: instruction requires: arm-mode
+@ CHECK-T2: ldr.w pc, .Ltmp[[TMP0:[0-9]+]]
+@ CHECK-DARWIN-T2: ldr.w pc, Ltmp0
+@ CHECK-NONE: error: instruction requires: thumb2
ldr sp, = 0x8
@ CHECK-ARM: ldr sp, .Ltmp[[TMP1:[0-9]+]]
@ CHECK-DARWIN-ARM: ldr sp, Ltmp1
-@ CHECK-SP: ldr.w sp, .Ltmp[[TMP0:[0-9]+]]
-@ CHECK-DARWIN-SP: ldr.w sp, Ltmp0
-@ CHECK-NONE: error: instruction requires: arm-mode
+@ CHECK-T2: ldr.w sp, .Ltmp[[TMP1:[0-9]+]]
+@ CHECK-DARWIN-T2: ldr.w sp, Ltmp1
+@ CHECK-NONE: error: instruction requires: thumb2
diff --git a/test/MC/COFF/bad-expr.s b/test/MC/COFF/bad-expr.s
index ecbdd415c3a6..cbbd5d0c946f 100644
--- a/test/MC/COFF/bad-expr.s
+++ b/test/MC/COFF/bad-expr.s
@@ -1,7 +1,6 @@
// RUN: not llvm-mc -filetype=obj -triple i386-pc-win32 %s 2>&1 | FileCheck %s
-// CHECK: symbol '__ImageBase' can not be undefined in a subtraction expression
-
.data
_x:
+// CHECK: [[@LINE+1]]:{{[0-9]+}}: error: symbol '__ImageBase' can not be undefined in a subtraction expression
.long _x-__ImageBase
diff --git a/test/MC/COFF/cv-def-range-gap.s b/test/MC/COFF/cv-def-range-gap.s
index 9c1531819963..29f2def8e1bf 100644
--- a/test/MC/COFF/cv-def-range-gap.s
+++ b/test/MC/COFF/cv-def-range-gap.s
@@ -2,12 +2,13 @@
# This tries to test defrange gap edge cases.
-# CHECK: Local {
+# CHECK: LocalSym {
# CHECK: Type: int (0x74)
# CHECK: VarName: p
# CHECK: }
-# CHECK-NOT: Local {
-# CHECK: DefRangeRegister {
+# CHECK-NOT: LocalSym {
+# CHECK: DefRangeRegisterSym {
+# CHECK-NEXT: Kind: S_DEFRANGE_REGISTER (0x1141)
# CHECK-NEXT: Register: 23
# CHECK-NEXT: MayHaveNoName: 0
# CHECK-NEXT: LocalVariableAddrRange {
@@ -20,7 +21,8 @@
# CHECK-NEXT: Range: 0x1
# CHECK-NEXT: ]
# CHECK-NEXT: }
-# CHECK-NEXT: DefRangeRegister {
+# CHECK-NEXT: DefRangeRegisterSym {
+# CHECK-NEXT: Kind: S_DEFRANGE_REGISTER (0x1141)
# CHECK-NEXT: Register: 23
# CHECK-NEXT: MayHaveNoName: 0
# CHECK-NEXT: LocalVariableAddrRange {
@@ -29,7 +31,8 @@
# CHECK-NEXT: Range: 0x6
# CHECK-NEXT: }
# CHECK-NEXT: }
-# CHECK-NEXT: DefRangeRegister {
+# CHECK-NEXT: DefRangeRegisterSym {
+# CHECK-NEXT: Kind: S_DEFRANGE_REGISTER (0x1141)
# CHECK-NEXT: Register: 23
# CHECK-NEXT: MayHaveNoName: 0
# CHECK-NEXT: LocalVariableAddrRange {
@@ -38,7 +41,8 @@
# CHECK-NEXT: Range: 0x1
# CHECK-NEXT: }
# CHECK-NEXT: }
-# CHECK-NEXT: DefRangeRegister {
+# CHECK-NEXT: DefRangeRegisterSym {
+# CHECK-NEXT: Kind: S_DEFRANGE_REGISTER (0x1141)
# CHECK-NEXT: Register: 23
# CHECK-NEXT: MayHaveNoName: 0
# CHECK-NEXT: LocalVariableAddrRange {
diff --git a/test/MC/COFF/cv-def-range.s b/test/MC/COFF/cv-def-range.s
index 5ac0df7f7d96..7a90ec263683 100644
--- a/test/MC/COFF/cv-def-range.s
+++ b/test/MC/COFF/cv-def-range.s
@@ -77,18 +77,18 @@ Ltmp3:
.short 4431 # Record kind: S_PROC_ID_END
.cv_def_range Lvar_begin0 Lvar_end0, "\102\021\374\377\377\377"
-# CHECK: DefRangeFramePointerRel {
+# CHECK: DefRangeFramePointerRelSym {
# CHECK: Offset: -4
# CHECK: LocalVariableAddrRange {
# CHECK: OffsetStart: .text+0x9
# CHECK: ISectStart: 0x0
# CHECK: Range: 0xF
# CHECK: }
+# CHECK: BlockRelocations [
+# CHECK: 0x4 IMAGE_REL_I386_SECREL .text
+# CHECK: 0x8 IMAGE_REL_I386_SECTION .text
+# CHECK: ]
# CHECK: }
-# CHECK: BlockRelocations [
-# CHECK: 0x4 IMAGE_REL_I386_SECREL .text
-# CHECK: 0x8 IMAGE_REL_I386_SECTION .text
-# CHECK: ]
Ltmp1:
.p2align 2
diff --git a/test/MC/COFF/cv-inline-linetable-infloop.s b/test/MC/COFF/cv-inline-linetable-infloop.s
index 804ed6f404d9..6b8e708befc4 100644
--- a/test/MC/COFF/cv-inline-linetable-infloop.s
+++ b/test/MC/COFF/cv-inline-linetable-infloop.s
@@ -1,6 +1,6 @@
# RUN: llvm-mc -triple=x86_64-pc-win32 -filetype=obj < %s | llvm-readobj -codeview | FileCheck %s
-# CHECK: InlineSite {
+# CHECK: InlineSiteSym {
# CHECK: BinaryAnnotations [
# CHECK: ChangeLineOffset: 1
# CHECK: ChangeCodeLength: 0x2
diff --git a/test/MC/COFF/cv-inline-linetable-unlikely.s b/test/MC/COFF/cv-inline-linetable-unlikely.s
index dd3a66f419cc..bfb745bd9bb1 100644
--- a/test/MC/COFF/cv-inline-linetable-unlikely.s
+++ b/test/MC/COFF/cv-inline-linetable-unlikely.s
@@ -19,13 +19,13 @@
# calls to __asan_report*, for which it is very important to have an accurate
# stack trace.
-# CHECK: ProcStart {
+# CHECK: GlobalProcIdSym {
# CHECK: FunctionType: g (0x1003)
# CHECK: CodeOffset: g+0x0
# CHECK: DisplayName: g
# CHECK: LinkageName: g
# CHECK: }
-# CHECK: InlineSite {
+# CHECK: InlineSiteSym {
# CHECK: Inlinee: f (0x1002)
# CHECK: BinaryAnnotations [
# CHECK-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0xE, LineOffset: 1}
diff --git a/test/MC/COFF/cv-inline-linetable-unreachable.s b/test/MC/COFF/cv-inline-linetable-unreachable.s
index 0f29d1667c35..d894fc758fb1 100644
--- a/test/MC/COFF/cv-inline-linetable-unreachable.s
+++ b/test/MC/COFF/cv-inline-linetable-unreachable.s
@@ -76,7 +76,7 @@ Ltmp6:
.short 4429
.asciz "\000\000\000\000\000\000\000\000\003\020\000"
.cv_inline_linetable 1 1 3 Lfunc_begin0 Lfunc_end0
-# CHECK: InlineSite {
+# CHECK: InlineSiteSym {
# CHECK: PtrParent: 0x0
# CHECK: PtrEnd: 0x0
# CHECK: Inlinee: f (0x1003)
diff --git a/test/MC/COFF/cv-inline-linetable.s b/test/MC/COFF/cv-inline-linetable.s
index bb68fcde21be..2c89f9836c42 100644
--- a/test/MC/COFF/cv-inline-linetable.s
+++ b/test/MC/COFF/cv-inline-linetable.s
@@ -88,7 +88,7 @@ Ltmp4:
.short 4429
.asciz "\000\000\000\000\000\000\000\000\003\020\000"
.cv_inline_linetable 1 1 9 Lfunc_begin0 Lfunc_end0
-# CHECK: InlineSite {
+# CHECK: InlineSiteSym {
# CHECK: PtrParent: 0x0
# CHECK: PtrEnd: 0x0
# CHECK: Inlinee: bar (0x1003)
@@ -106,7 +106,7 @@ Ltmp6:
.short 4429
.asciz "\000\000\000\000\000\000\000\000\004\020\000"
.cv_inline_linetable 2 1 3 Lfunc_begin0 Lfunc_end0
-# CHECK: InlineSite {
+# CHECK: InlineSiteSym {
# CHECK: PtrParent: 0x0
# CHECK: PtrEnd: 0x0
# CHECK: Inlinee: foo (0x1004)
diff --git a/test/MC/Disassembler/Mips/mt/valid-r2-el.txt b/test/MC/Disassembler/Mips/mt/valid-r2-el.txt
new file mode 100644
index 000000000000..62e7092086aa
--- /dev/null
+++ b/test/MC/Disassembler/Mips/mt/valid-r2-el.txt
@@ -0,0 +1,32 @@
+# RUN: llvm-mc --disassemble %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+mt | FileCheck %s
+0xc1 0x0b 0x60 0x41 # CHECK: dmt
+0xc1 0x0b 0x65 0x41 # CHECK: dmt $5
+0xe1 0x0b 0x60 0x41 # CHECK: emt
+0xe1 0x0b 0x64 0x41 # CHECK: emt $4
+0x01 0x00 0x60 0x41 # CHECK: dvpe
+0x01 0x00 0x66 0x41 # CHECK: dvpe $6
+0x21 0x00 0x60 0x41 # CHECK: evpe
+0x21 0x00 0x64 0x41 # CHECK: evpe $4
+0x08 0x10 0x65 0x7c # CHECK: fork $2, $3, $5
+0x09 0x00 0x80 0x7c # CHECK: yield $4
+0x09 0x20 0xa0 0x7c # CHECK: yield $4, $5
+0x02 0x20 0x05 0x41 # CHECK: mftr $4, $5, 0, 2, 0
+0x20 0x20 0x05 0x41 # CHECK: mftr $4, $5, 1, 0, 0
+0x21 0x20 0x00 0x41 # CHECK: mftr $4, $zero, 1, 1, 0
+0x21 0x20 0x0a 0x41 # CHECK: mftr $4, $10, 1, 1, 0
+0x22 0x20 0x0a 0x41 # CHECK: mftr $4, $10, 1, 2, 0
+0x32 0x20 0x0a 0x41 # CHECK: mftr $4, $10, 1, 2, 1
+0x23 0x20 0x1a 0x41 # CHECK: mftr $4, $26, 1, 3, 0
+0x23 0x20 0x1f 0x41 # CHECK: mftr $4, $ra, 1, 3, 0
+0x24 0x20 0x0e 0x41 # CHECK: mftr $4, $14, 1, 4, 0
+0x25 0x20 0x0f 0x41 # CHECK: mftr $4, $15, 1, 5, 0
+0x02 0x28 0x84 0x41 # CHECK: mttr $4, $5, 0, 2, 0
+0x20 0x28 0x84 0x41 # CHECK: mttr $4, $5, 1, 0, 0
+0x21 0x00 0x84 0x41 # CHECK: mttr $4, $zero, 1, 1, 0
+0x21 0x50 0x84 0x41 # CHECK: mttr $4, $10, 1, 1, 0
+0x22 0x50 0x84 0x41 # CHECK: mttr $4, $10, 1, 2, 0
+0x32 0x50 0x84 0x41 # CHECK: mttr $4, $10, 1, 2, 1
+0x23 0xd0 0x84 0x41 # CHECK: mttr $4, $26, 1, 3, 0
+0x23 0xf8 0x84 0x41 # CHECK: mttr $4, $ra, 1, 3, 0
+0x24 0x70 0x84 0x41 # CHECK: mttr $4, $14, 1, 4, 0
+0x25 0x78 0x84 0x41 # CHECK: mttr $4, $15, 1, 5, 0
diff --git a/test/MC/Disassembler/Mips/mt/valid-r2.txt b/test/MC/Disassembler/Mips/mt/valid-r2.txt
new file mode 100644
index 000000000000..4786d8b5591f
--- /dev/null
+++ b/test/MC/Disassembler/Mips/mt/valid-r2.txt
@@ -0,0 +1,32 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+mt | FileCheck %s
+0x41 0x60 0x0b 0xc1 # CHECK: dmt
+0x41 0x65 0x0b 0xc1 # CHECK: dmt $5
+0x41 0x60 0x0b 0xe1 # CHECK: emt
+0x41 0x64 0x0b 0xe1 # CHECK: emt $4
+0x41 0x60 0x00 0x01 # CHECK: dvpe
+0x41 0x66 0x00 0x01 # CHECK: dvpe $6
+0x41 0x60 0x00 0x21 # CHECK: evpe
+0x41 0x64 0x00 0x21 # CHECK: evpe $4
+0x7c 0x65 0x10 0x08 # CHECK: fork $2, $3, $5
+0x7c 0x80 0x00 0x09 # CHECK: yield $4
+0x7c 0xa0 0x20 0x09 # CHECK: yield $4, $5
+0x41 0x05 0x20 0x02 # CHECK: mftr $4, $5, 0, 2, 0
+0x41 0x05 0x20 0x20 # CHECK: mftr $4, $5, 1, 0, 0
+0x41 0x00 0x20 0x21 # CHECK: mftr $4, $zero, 1, 1, 0
+0x41 0x0a 0x20 0x21 # CHECK: mftr $4, $10, 1, 1, 0
+0x41 0x0a 0x20 0x22 # CHECK: mftr $4, $10, 1, 2, 0
+0x41 0x0a 0x20 0x32 # CHECK: mftr $4, $10, 1, 2, 1
+0x41 0x1a 0x20 0x23 # CHECK: mftr $4, $26, 1, 3, 0
+0x41 0x1f 0x20 0x23 # CHECK: mftr $4, $ra, 1, 3, 0
+0x41 0x0e 0x20 0x24 # CHECK: mftr $4, $14, 1, 4, 0
+0x41 0x0f 0x20 0x25 # CHECK: mftr $4, $15, 1, 5, 0
+0x41 0x84 0x28 0x02 # CHECK: mttr $4, $5, 0, 2, 0
+0x41 0x84 0x28 0x20 # CHECK: mttr $4, $5, 1, 0, 0
+0x41 0x84 0x00 0x21 # CHECK: mttr $4, $zero, 1, 1, 0
+0x41 0x84 0x50 0x21 # CHECK: mttr $4, $10, 1, 1, 0
+0x41 0x84 0x50 0x22 # CHECK: mttr $4, $10, 1, 2, 0
+0x41 0x84 0x50 0x32 # CHECK: mttr $4, $10, 1, 2, 1
+0x41 0x84 0xd0 0x23 # CHECK: mttr $4, $26, 1, 3, 0
+0x41 0x84 0xf8 0x23 # CHECK: mttr $4, $ra, 1, 3, 0
+0x41 0x84 0x70 0x24 # CHECK: mttr $4, $14, 1, 4, 0
+0x41 0x84 0x78 0x25 # CHECK: mttr $4, $15, 1, 5, 0
diff --git a/test/MC/ELF/bad-expr3.s b/test/MC/ELF/bad-expr3.s
index 990167cda53f..cf5d6f47335f 100644
--- a/test/MC/ELF/bad-expr3.s
+++ b/test/MC/ELF/bad-expr3.s
@@ -1,8 +1,7 @@
// RUN: not llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o /dev/null \
// RUN: 2>&1 | FileCheck %s
-// CHECK: Cannot represent a difference across sections
-
+// CHECK: [[@LINE+1]]:{{[0-9]+}}: error: Cannot represent a difference across sections
.long foo - bar
.section .zed
foo:
diff --git a/test/MC/Mips/addend.s b/test/MC/Mips/addend.s
new file mode 100644
index 000000000000..93ce4f413aeb
--- /dev/null
+++ b/test/MC/Mips/addend.s
@@ -0,0 +1,21 @@
+# RUN: llvm-mc -filetype=obj -triple=mips-unknown-linux -mcpu=mips32r6 %s -o %t.o
+# RUN: llvm-readobj -s -section-data %t.o | FileCheck %s
+
+# CHECK: Name: .text
+# CHECK-NEXT: Type:
+# CHECK-NEXT: Flags [
+# CHECK-NEXT: SHF_ALLOC
+# CHECK-NEXT: SHF_EXECINSTR
+# CHECK-NEXT: ]
+# CHECK-NEXT: Address:
+# CHECK-NEXT: Offset:
+# CHECK-NEXT: Size:
+# CHECK-NEXT: Link:
+# CHECK-NEXT: Info:
+# CHECK-NEXT: AddressAlignment:
+# CHECK-NEXT: EntrySize:
+# CHECK-NEXT: SectionData (
+# CHECK-NEXT: 0000: 00000008 |
+# CHECK-NEXT: )
+
+ .word _foo+8-.
diff --git a/test/MC/Mips/mt/abiflag.s b/test/MC/Mips/mt/abiflag.s
new file mode 100644
index 000000000000..b4769cba4c2d
--- /dev/null
+++ b/test/MC/Mips/mt/abiflag.s
@@ -0,0 +1,10 @@
+# RUN: llvm-mc < %s -arch=mips -mcpu=mips32r2 -mattr=+mt -filetype=obj -o - \
+# RUN: | llvm-readobj -mips-abi-flags | FileCheck %s
+
+# Test that the usage of the MT ASE is recorded in .MIPS.abiflags
+
+# CHECK: ASEs
+# CHECK-NEXT: MT (0x40)
+
+ .text
+ nop
diff --git a/test/MC/Mips/mt/invalid-wrong-error.s b/test/MC/Mips/mt/invalid-wrong-error.s
new file mode 100644
index 000000000000..0247089b70ae
--- /dev/null
+++ b/test/MC/Mips/mt/invalid-wrong-error.s
@@ -0,0 +1,3 @@
+# RUN: not llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt < %s 2>&1 | FileCheck %s
+ mftr 0($4), $5, 0, 0, 0 # CHECK: error: unexpected token in argument list
+ mttr 0($4), $5, 0, 0, 0 # CHECK: error: unexpected token in argument list
diff --git a/test/MC/Mips/mt/invalid.s b/test/MC/Mips/mt/invalid.s
new file mode 100644
index 000000000000..d4055c4a50f4
--- /dev/null
+++ b/test/MC/Mips/mt/invalid.s
@@ -0,0 +1,27 @@
+# RUN: not llvm-mc -arch=mips -mcpu=mips32 -mattr=+mt < %s 2>&1 | FileCheck %s
+ dmt 4 # CHECK: error: invalid operand for instruction
+ dmt $4, $5 # CHECK: error: invalid operand for instruction
+ dmt $5, 0($4) # CHECK: error: invalid operand for instruction
+ emt 4 # CHECK: error: invalid operand for instruction
+ emt $4, $5 # CHECK: error: invalid operand for instruction
+ emt $5, 0($5) # CHECK: error: invalid operand for instruction
+ dvpe 4 # CHECK: error: invalid operand for instruction
+ dvpe $4, $5 # CHECK: error: invalid operand for instruction
+ dvpe $5, 0($4) # CHECK: error: invalid operand for instruction
+ evpe 4 # CHECK: error: invalid operand for instruction
+ evpe $4, $5 # CHECK: error: invalid operand for instruction
+ evpe $5, 0($5) # CHECK: error: invalid operand for instruction
+ mftr $4, 0($5), 0, 0, 0 # CHECK: error: invalid operand for instruction
+ mftr $4, $5, 2, 0, 0 # CHECK: error: expected 1-bit unsigned immediate
+ mftr $4, $5, -1, 0, 0 # CHECK: error: expected 1-bit unsigned immediate
+ mftr $4, $5, 0, 8, 0 # CHECK: error: expected 3-bit unsigned immediate
+ mftr $4, $5, 0, -1, 0 # CHECK: error: expected 3-bit unsigned immediate
+ mftr $4, $4, 0, 0, 2 # CHECK: error: expected 1-bit unsigned immediate
+ mftr $4, $5, 0, 0, -1 # CHECK: error: expected 1-bit unsigned immediate
+ mttr $4, 0($5), 0, 0, 0 # CHECK: error: invalid operand for instruction
+ mttr $4, $5, 2, 0, 0 # CHECK: error: expected 1-bit unsigned immediate
+ mttr $4, $5, -1, 0, 0 # CHECK: error: expected 1-bit unsigned immediate
+ mttr $4, $5, 0, 8, 0 # CHECK: error: expected 3-bit unsigned immediate
+ mttr $4, $5, 0, -1, 0 # CHECK: error: expected 3-bit unsigned immediate
+ mttr $4, $4, 0, 0, 2 # CHECK: error: expected 1-bit unsigned immediate
+ mttr $4, $5, 0, 0, -1 # CHECK: error: expected 1-bit unsigned immediate
diff --git a/test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s b/test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s
new file mode 100644
index 000000000000..4e872412e6ef
--- /dev/null
+++ b/test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s
@@ -0,0 +1,18 @@
+# RUN: not llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s \
+# RUN: 2>&1 | FileCheck %s
+
+# The integrated assembler produces a wrong or misleading error message.
+
+ mftc0 0($4), $5 # CHECK: error: unexpected token in argument list
+ mftc0 0($4), $5, 1 # CHECK: error: unexpected token in argument list
+ mftgpr 0($4), $5 # CHECK: error: unexpected token in argument list
+ mftlo 0($3) # CHECK: error: unexpected token in argument list
+ mftlo 0($3), $ac1 # CHECK: error: unexpected token in argument list
+ mfthi 0($3) # CHECK: error: unexpected token in argument list
+ mfthi 0($3), $ac1 # CHECK: error: unexpected token in argument list
+ mftacx 0($3) # CHECK: error: unexpected token in argument list
+ mftacx 0($3), $ac1 # CHECK: error: unexpected token in argument list
+ mftdsp 0($4) # CHECK: error: unexpected token in argument list
+ mftc1 0($4), $f4 # CHECK: error: unexpected token in argument list
+ mfthc1 0($4), $f4 # CHECK: error: unexpected token in argument list
+ cftc1 0($4), $f8 # CHECK: error: unexpected token in argument list
diff --git a/test/MC/Mips/mt/mftr-mttr-aliases-invalid.s b/test/MC/Mips/mt/mftr-mttr-aliases-invalid.s
new file mode 100644
index 000000000000..06ae8c72e654
--- /dev/null
+++ b/test/MC/Mips/mt/mftr-mttr-aliases-invalid.s
@@ -0,0 +1,23 @@
+# RUN: not llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s \
+# RUN: 2>&1 | FileCheck %s
+
+ mftc0 $4, 0($5) # CHECK: error: invalid operand for instruction
+ mftc0 $4, 0($5), 1 # CHECK: error: invalid operand for instruction
+ mftc0 $4, $5, -1 # CHECK: error: expected 3-bit unsigned immediate
+ mftc0 $4, $5, 9 # CHECK: error: expected 3-bit unsigned immediate
+ mftc0 $4, $5, $6 # CHECK: error: expected 3-bit unsigned immediate
+ mftgpr $4, 0($5) # CHECK: error: invalid operand for instruction
+ mftgpr $4, $5, $6 # CHECK: error: invalid operand for instruction
+ mftlo $3, 0($ac1) # CHECK: error: invalid operand for instruction
+ mftlo $4, $ac1, $4 # CHECK: error: invalid operand for instruction
+ mfthi $3, 0($ac1) # CHECK: error: invalid operand for instruction
+ mfthi $4, $ac1, $4 # CHECK: error: invalid operand for instruction
+ mftacx $3, 0($ac1) # CHECK: error: invalid operand for instruction
+ mftacx $4, $ac1, $4 # CHECK: error: invalid operand for instruction
+ mftdsp $4, $5 # CHECK: error: invalid operand for instruction
+ mftdsp $4, $f5 # CHECK: error: invalid operand for instruction
+ mftdsp $4, $ac0 # CHECK: error: invalid operand for instruction
+ mftc1 $4, 0($f4) # CHECK: error: invalid operand for instruction
+ mfthc1 $4, 0($f4) # CHECK: error: invalid operand for instruction
+ cftc1 $4, 0($f4) # CHECK: error: invalid operand for instruction
+ cftc1 $4, $f4, $5 # CHECK: error: invalid operand for instruction
diff --git a/test/MC/Mips/mt/mftr-mttr-aliases.s b/test/MC/Mips/mt/mftr-mttr-aliases.s
new file mode 100644
index 000000000000..92ed9f9281f2
--- /dev/null
+++ b/test/MC/Mips/mt/mftr-mttr-aliases.s
@@ -0,0 +1,47 @@
+# RUN: llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s | FileCheck %s
+
+# Check the various aliases of the m[ft]tr instruction.
+
+ mftc0 $4, $5 # CHECK: mftr $4, $5, 0, 0, 0 # encoding: [0x41,0x05,0x20,0x00]
+ mftc0 $6, $7, 1 # CHECK: mftr $6, $7, 0, 1, 0 # encoding: [0x41,0x07,0x30,0x01]
+ mftgpr $5, $9 # CHECK: mftr $5, $9, 1, 0, 0 # encoding: [0x41,0x09,0x28,0x20]
+ mftlo $3 # CHECK: mftr $3, $zero, 1, 1, 0 # encoding: [0x41,0x00,0x18,0x21]
+ mftlo $3, $ac0 # CHECK: mftr $3, $zero, 1, 1, 0 # encoding: [0x41,0x00,0x18,0x21]
+ mftlo $3, $ac1 # CHECK: mftr $3, $4, 1, 1, 0 # encoding: [0x41,0x04,0x18,0x21]
+ mftlo $3, $ac2 # CHECK: mftr $3, $8, 1, 1, 0 # encoding: [0x41,0x08,0x18,0x21]
+ mftlo $3, $ac3 # CHECK: mftr $3, $12, 1, 1, 0 # encoding: [0x41,0x0c,0x18,0x21]
+ mfthi $3, $ac0 # CHECK: mftr $3, $1, 1, 1, 0 # encoding: [0x41,0x01,0x18,0x21]
+ mfthi $3, $ac1 # CHECK: mftr $3, $5, 1, 1, 0 # encoding: [0x41,0x05,0x18,0x21]
+ mfthi $3, $ac2 # CHECK: mftr $3, $9, 1, 1, 0 # encoding: [0x41,0x09,0x18,0x21]
+ mfthi $3, $ac3 # CHECK: mftr $3, $13, 1, 1, 0 # encoding: [0x41,0x0d,0x18,0x21]
+ mftacx $3, $ac0 # CHECK: mftr $3, $2, 1, 1, 0 # encoding: [0x41,0x02,0x18,0x21]
+ mftacx $3, $ac1 # CHECK: mftr $3, $6, 1, 1, 0 # encoding: [0x41,0x06,0x18,0x21]
+ mftacx $3, $ac2 # CHECK: mftr $3, $10, 1, 1, 0 # encoding: [0x41,0x0a,0x18,0x21]
+ mftacx $3, $ac3 # CHECK: mftr $3, $14, 1, 1, 0 # encoding: [0x41,0x0e,0x18,0x21]
+ mftdsp $4 # CHECK: mftr $4, $16, 1, 1, 0 # encoding: [0x41,0x10,0x20,0x21]
+ mftc1 $4, $f5 # CHECK: mftr $4, $5, 1, 2, 0 # encoding: [0x41,0x05,0x20,0x22]
+ mfthc1 $4, $f5 # CHECK: mftr $4, $5, 1, 2, 1 # encoding: [0x41,0x05,0x20,0x32]
+ cftc1 $4, $f9 # CHECK: mftr $4, $9, 1, 3, 0 # encoding: [0x41,0x09,0x20,0x23]
+
+ mttc0 $4, $5 # CHECK: mttr $4, $5, 0, 0, 0 # encoding: [0x41,0x84,0x28,0x00]
+ mttc0 $6, $7, 1 # CHECK: mttr $6, $7, 0, 1, 0 # encoding: [0x41,0x86,0x38,0x01]
+ mttgpr $5, $9 # CHECK: mttr $5, $9, 1, 0, 0 # encoding: [0x41,0x85,0x48,0x20]
+ mttlo $3 # CHECK: mttr $3, $zero, 1, 1, 0 # encoding: [0x41,0x83,0x00,0x21]
+ mttlo $3, $ac0 # CHECK: mttr $3, $zero, 1, 1, 0 # encoding: [0x41,0x83,0x00,0x21]
+ mttlo $3, $ac1 # CHECK: mttr $3, $4, 1, 1, 0 # encoding: [0x41,0x83,0x20,0x21]
+ mttlo $3, $ac2 # CHECK: mttr $3, $8, 1, 1, 0 # encoding: [0x41,0x83,0x40,0x21]
+ mttlo $3, $ac3 # CHECK: mttr $3, $12, 1, 1, 0 # encoding: [0x41,0x83,0x60,0x21]
+ mtthi $3 # CHECK: mttr $3, $1, 1, 1, 0 # encoding: [0x41,0x83,0x08,0x21]
+ mtthi $3, $ac0 # CHECK: mttr $3, $1, 1, 1, 0 # encoding: [0x41,0x83,0x08,0x21]
+ mtthi $3, $ac1 # CHECK: mttr $3, $5, 1, 1, 0 # encoding: [0x41,0x83,0x28,0x21]
+ mtthi $3, $ac2 # CHECK: mttr $3, $9, 1, 1, 0 # encoding: [0x41,0x83,0x48,0x21]
+ mtthi $3, $ac3 # CHECK: mttr $3, $13, 1, 1, 0 # encoding: [0x41,0x83,0x68,0x21]
+ mttacx $3 # CHECK: mttr $3, $2, 1, 1, 0 # encoding: [0x41,0x83,0x10,0x21]
+ mttacx $3, $ac0 # CHECK: mttr $3, $2, 1, 1, 0 # encoding: [0x41,0x83,0x10,0x21]
+ mttacx $3, $ac1 # CHECK: mttr $3, $6, 1, 1, 0 # encoding: [0x41,0x83,0x30,0x21]
+ mttacx $3, $ac2 # CHECK: mttr $3, $10, 1, 1, 0 # encoding: [0x41,0x83,0x50,0x21]
+ mttacx $3, $ac3 # CHECK: mttr $3, $14, 1, 1, 0 # encoding: [0x41,0x83,0x70,0x21]
+ mttdsp $4 # CHECK: mttr $4, $16, 1, 1, 0 # encoding: [0x41,0x84,0x80,0x21]
+ mttc1 $4, $f5 # CHECK: mttr $4, $5, 1, 2, 0 # encoding: [0x41,0x84,0x28,0x22]
+ mtthc1 $4, $f5 # CHECK: mttr $4, $5, 1, 2, 1 # encoding: [0x41,0x84,0x28,0x32]
+ cttc1 $4, $f9 # CHECK: mttr $4, $9, 1, 3, 0 # encoding: [0x41,0x84,0x48,0x23]
diff --git a/test/MC/Mips/mt/mftr-mttr-reserved-valid.s b/test/MC/Mips/mt/mftr-mttr-reserved-valid.s
new file mode 100644
index 000000000000..c40e81bfc7d7
--- /dev/null
+++ b/test/MC/Mips/mt/mftr-mttr-reserved-valid.s
@@ -0,0 +1,8 @@
+# RUN: llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s | FileCheck %s
+
+# The selector value and register values here are marked as reserved in the
+# documentation, but GAS accepts them without warning.
+ mftr $31, $31, 1, 1, 0 # CHECK: mftr $ra, $ra, 1, 1, 0 # encoding: [0x41,0x1f,0xf8,0x21]
+ mttr $31, $31, 1, 1, 0 # CHECK: mttr $ra, $ra, 1, 1, 0 # encoding: [0x41,0x9f,0xf8,0x21]
+ mftr $31, $13, 1, 6, 0 # CHECK: mftr $ra, $13, 1, 6, 0 # encoding: [0x41,0x0d,0xf8,0x26]
+ mttr $31, $13, 1, 6, 0 # CHECK: mttr $ra, $13, 1, 6, 0 # encoding: [0x41,0x9f,0x68,0x26]
diff --git a/test/MC/Mips/mt/module-directive-invalid.s b/test/MC/Mips/mt/module-directive-invalid.s
new file mode 100644
index 000000000000..38baaa07cdc1
--- /dev/null
+++ b/test/MC/Mips/mt/module-directive-invalid.s
@@ -0,0 +1,6 @@
+# RUN: not llvm-mc -arch=mips -mcpu=mips32r5 < %s 2>&1 | FileCheck %s
+
+# CHECK: error: .module directive must appear before any code
+ .set nomips16
+ .module mt
+ nop
diff --git a/test/MC/Mips/mt/module-directive.s b/test/MC/Mips/mt/module-directive.s
new file mode 100644
index 000000000000..d316f054eaae
--- /dev/null
+++ b/test/MC/Mips/mt/module-directive.s
@@ -0,0 +1,16 @@
+# RUN: llvm-mc < %s -arch=mips -mcpu=mips32r2 -filetype=obj -o - | \
+# RUN: llvm-readobj -mips-abi-flags | FileCheck --check-prefix=CHECK-OBJ %s
+# RUN: llvm-mc < %s -arch=mips -mcpu=mips32r2 -filetype=asm -o - | \
+# RUN: FileCheck --check-prefix=CHECK-ASM %s
+
+# Test that the .module directive sets the MT flag in .MIPS.abiflags when
+# assembling to boject files.
+
+# Test that the .moodule directive is re-emitted when expanding assembly.
+
+# CHECK-OBJ: ASEs
+# CHECK-OBJ-NEXT: MT (0x40)
+
+# CHECK-ASM: .module mt
+.module mt
+nop
diff --git a/test/MC/Mips/mt/set-directive.s b/test/MC/Mips/mt/set-directive.s
new file mode 100644
index 000000000000..53ed4b273795
--- /dev/null
+++ b/test/MC/Mips/mt/set-directive.s
@@ -0,0 +1,14 @@
+# RUN: llvm-mc < %s -arch=mips -mcpu=mips32r2 -filetype=obj -o - | \
+# RUN: llvm-readobj -mips-abi-flags | FileCheck %s --check-prefix=CHECK-OBJ
+# RUN: llvm-mc < %s -arch=mips -mcpu=mips32r2 -filetype=asm -o - | \
+# RUN: FileCheck %s --check-prefix=CHECK-ASM
+
+# Test that the MT ASE flag in .MIPS.abiflags is _not_ set by .set.
+# Test that '.set mt' is emitted by the asm target streamer.
+
+# CHECK-OBJ: ASEs
+# CHECK-OBJ-NOT: MT (0x40)
+
+# CHECK-ASM: .set mt
+ .set mt
+ nop
diff --git a/test/MC/Mips/mt/valid.s b/test/MC/Mips/mt/valid.s
new file mode 100644
index 000000000000..9fa07870a61f
--- /dev/null
+++ b/test/MC/Mips/mt/valid.s
@@ -0,0 +1,33 @@
+# RUN: llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s \
+# RUN: | FileCheck %s
+ dmt # CHECK: dmt # encoding: [0x41,0x60,0x0b,0xc1]
+ dmt $5 # CHECK: dmt $5 # encoding: [0x41,0x65,0x0b,0xc1]
+ emt # CHECK: emt # encoding: [0x41,0x60,0x0b,0xe1]
+ emt $4 # CHECK: emt $4 # encoding: [0x41,0x64,0x0b,0xe1]
+ dvpe # CHECK: dvpe # encoding: [0x41,0x60,0x00,0x01]
+ dvpe $6 # CHECK: dvpe $6 # encoding: [0x41,0x66,0x00,0x01]
+ evpe # CHECK: evpe # encoding: [0x41,0x60,0x00,0x21]
+ evpe $4 # CHECK: evpe $4 # encoding: [0x41,0x64,0x00,0x21]
+ fork $2, $3, $5 # CHECK: fork $2, $3, $5 # encoding: [0x7c,0x65,0x10,0x08]
+ yield $4 # CHECK: yield $4 # encoding: [0x7c,0x80,0x00,0x09]
+ yield $4, $5 # CHECK: yield $4, $5 # encoding: [0x7c,0xa0,0x20,0x09]
+ mftr $4, $5, 0, 2, 0 # CHECK: mftr $4, $5, 0, 2, 0 # encoding: [0x41,0x05,0x20,0x02]
+ mftr $4, $5, 1, 0, 0 # CHECK: mftr $4, $5, 1, 0, 0 # encoding: [0x41,0x05,0x20,0x20]
+ mftr $4, $0, 1, 1, 0 # CHECK: mftr $4, $zero, 1, 1, 0 # encoding: [0x41,0x00,0x20,0x21]
+ mftr $4, $10, 1, 1, 0 # CHECK: mftr $4, $10, 1, 1, 0 # encoding: [0x41,0x0a,0x20,0x21]
+ mftr $4, $10, 1, 2, 0 # CHECK: mftr $4, $10, 1, 2, 0 # encoding: [0x41,0x0a,0x20,0x22]
+ mftr $4, $10, 1, 2, 1 # CHECK: mftr $4, $10, 1, 2, 1 # encoding: [0x41,0x0a,0x20,0x32]
+ mftr $4, $26, 1, 3, 0 # CHECK: mftr $4, $26, 1, 3, 0 # encoding: [0x41,0x1a,0x20,0x23]
+ mftr $4, $31, 1, 3, 0 # CHECK: mftr $4, $ra, 1, 3, 0 # encoding: [0x41,0x1f,0x20,0x23]
+ mftr $4, $14, 1, 4, 0 # CHECK: mftr $4, $14, 1, 4, 0 # encoding: [0x41,0x0e,0x20,0x24]
+ mftr $4, $15, 1, 5, 0 # CHECK: mftr $4, $15, 1, 5, 0 # encoding: [0x41,0x0f,0x20,0x25]
+ mttr $4, $5, 0, 2, 0 # CHECK: mttr $4, $5, 0, 2, 0 # encoding: [0x41,0x84,0x28,0x02]
+ mttr $4, $5, 1, 0, 0 # CHECK: mttr $4, $5, 1, 0, 0 # encoding: [0x41,0x84,0x28,0x20]
+ mttr $4, $0, 1, 1, 0 # CHECK: mttr $4, $zero, 1, 1, 0 # encoding: [0x41,0x84,0x00,0x21]
+ mttr $4, $10, 1, 1, 0 # CHECK: mttr $4, $10, 1, 1, 0 # encoding: [0x41,0x84,0x50,0x21]
+ mttr $4, $10, 1, 2, 0 # CHECK: mttr $4, $10, 1, 2, 0 # encoding: [0x41,0x84,0x50,0x22]
+ mttr $4, $10, 1, 2, 1 # CHECK: mttr $4, $10, 1, 2, 1 # encoding: [0x41,0x84,0x50,0x32]
+ mttr $4, $26, 1, 3, 0 # CHECK: mttr $4, $26, 1, 3, 0 # encoding: [0x41,0x84,0xd0,0x23]
+ mttr $4, $31, 1, 3, 0 # CHECK: mttr $4, $ra, 1, 3, 0 # encoding: [0x41,0x84,0xf8,0x23]
+ mttr $4, $14, 1, 4, 0 # CHECK: mttr $4, $14, 1, 4, 0 # encoding: [0x41,0x84,0x70,0x24]
+ mttr $4, $15, 1, 5, 0 # CHECK: mttr $4, $15, 1, 5, 0 # encoding: [0x41,0x84,0x78,0x25]
diff --git a/test/MC/WebAssembly/array-fill.ll b/test/MC/WebAssembly/array-fill.ll
new file mode 100644
index 000000000000..4feabc0748e0
--- /dev/null
+++ b/test/MC/WebAssembly/array-fill.ll
@@ -0,0 +1,14 @@
+; RUN: llc -filetype=obj %s -o - | obj2yaml | FileCheck %s
+; PR33624
+
+source_filename = "ws.c"
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+%struct.bd = type { i8 }
+
+@gBd = hidden global [2 x %struct.bd] [%struct.bd { i8 1 }, %struct.bd { i8 2 }], align 1
+
+; CHECK: - Type: DATA
+; CHECK: Content: '0102'
+; CHECK: DataSize: 2
diff --git a/test/MC/WebAssembly/external-data.ll b/test/MC/WebAssembly/external-data.ll
index 6914736ac671..b8c97453413e 100644
--- a/test/MC/WebAssembly/external-data.ll
+++ b/test/MC/WebAssembly/external-data.ll
@@ -13,7 +13,8 @@
; CHECK: Index: 0
; CHECK: Offset: 0x0000000E
; CHECK: Segments:
-; CHECK: - Index: 0
+; CHECK: - SectionOffset: 6
+; CHECK: MemoryIndex: 0
; CHECK: Offset:
; CHECK: Opcode: I32_CONST
; CHECK: Value: 0
diff --git a/test/MC/WebAssembly/external-func-address.ll b/test/MC/WebAssembly/external-func-address.ll
index 4022b2c9bae9..53da9805f987 100644
--- a/test/MC/WebAssembly/external-func-address.ll
+++ b/test/MC/WebAssembly/external-func-address.ll
@@ -2,24 +2,33 @@
; Verify that addresses of external functions generate correctly typed
; imports and relocations or type R_TABLE_INDEX_I32.
-declare void @f1() #1
-@ptr_to_f1 = hidden global void ()* @f1, align 4
+declare void @f1(i32) #1
+@ptr_to_f1 = hidden global void (i32)* @f1, align 4
-
-; CHECK: - Type: IMPORT
-; CHECK: Imports:
-; CHECK: - Module: env
-; CHECK: Field: f1
-; CHECK: Kind: FUNCTION
-; CHECK: SigIndex: 0
-; CHECK: - Type: ELEM
-; CHECK: Segments:
-; CHECK: - Offset:
-; CHECK: Opcode: I32_CONST
-; CHECK: Value: 0
-; CHECK: Functions: [ 0 ]
-; CHECK: - Type: DATA
-; CHECK: Relocations:
-; CHECK: - Type: R_WEBASSEMBLY_TABLE_INDEX_I32
-; CHECK: Index: 0
-; CHECK: Offset: 0x00000006
+; CHECK: --- !WASM
+; CHECK-NEXT: FileHeader:
+; CHECK-NEXT: Version: 0x00000001
+; CHECK-NEXT: Sections:
+; CHECK-NEXT: - Type: TYPE
+; CHECK-NEXT: Signatures:
+; CHECK-NEXT: - Index: 0
+; CHECK-NEXT: ReturnType: NORESULT
+; CHECK-NEXT: ParamTypes:
+; CHECK-NEXT: - I32
+; CHECK: - Type: IMPORT
+; CHECK-NEXT: Imports:
+; CHECK-NEXT: - Module: env
+; CHECK-NEXT: Field: f1
+; CHECK-NEXT: Kind: FUNCTION
+; CHECK-NEXT: SigIndex: 0
+; CHECK: - Type: ELEM
+; CHECK-NEXT: Segments:
+; CHECK-NEXT: - Offset:
+; CHECK-NEXT: Opcode: I32_CONST
+; CHECK-NEXT: Value: 0
+; CHECK-NEXT: Functions: [ 0 ]
+; CHECK: - Type: DATA
+; CHECK-NEXT: Relocations:
+; CHECK-NEXT: - Type: R_WEBASSEMBLY_TABLE_INDEX_I32
+; CHECK-NEXT: Index: 0
+; CHECK-NEXT: Offset: 0x00000006
diff --git a/test/MC/WebAssembly/unnamed-data.ll b/test/MC/WebAssembly/unnamed-data.ll
index fd985088c1d2..fa0ff966a79f 100644
--- a/test/MC/WebAssembly/unnamed-data.ll
+++ b/test/MC/WebAssembly/unnamed-data.ll
@@ -46,7 +46,8 @@
; CHECK-NEXT: Index: 1
; CHECK-NEXT: Offset: 0x0000001E
; CHECK-NEXT: Segments:
-; CHECK-NEXT: - Index: 0
+; CHECK-NEXT: - SectionOffset: 6
+; CHECK-NEXT: MemoryIndex: 0
; CHECK-NEXT: Offset:
; CHECK-NEXT: Opcode: I32_CONST
; CHECK-NEXT: Value: 0
diff --git a/test/MC/WebAssembly/weak-alias.ll b/test/MC/WebAssembly/weak-alias.ll
index 6e2b8631d2b1..1d80ea4aac6c 100644
--- a/test/MC/WebAssembly/weak-alias.ll
+++ b/test/MC/WebAssembly/weak-alias.ll
@@ -3,27 +3,56 @@
; foo_alias() function is weak alias of function foo()
; Generates two exports of the same function, one of them weak
-@foo_alias = weak hidden alias i32 (...), bitcast (i32 ()* @foo to i32 (...)*)
+@foo_alias = weak hidden alias i32 (), i32 ()* @foo
+
+define hidden i32 @call_alias() #0 {
+entry:
+ %call = call i32 @foo_alias()
+ ret i32 %call
+}
define hidden i32 @foo() #0 {
entry:
ret i32 0
}
+
+; CHECK: - Type: TYPE
+; CHECK-NEXT: Signatures:
+; CHECK-NEXT: - Index: 0
+; CHECK-NEXT: ReturnType: I32
+; CHECK-NEXT: ParamTypes:
+
+; CHECK: - Type: IMPORT
+; CHECK-NEXT: Imports:
+; CHECK-NEXT: - Module: env
+; CHECK-NEXT: Field: foo_alias
+; CHECK-NEXT: Kind: FUNCTION
+; CHECK-NEXT: SigIndex: 0
+
+; CHECK: - Type: FUNCTION
+; CHECK-NEXT: FunctionTypes: [ 0, 0 ]
+
; CHECK: - Type: EXPORT
; CHECK-NEXT: Exports:
+; CHECK-NEXT: - Name: call_alias
+; CHECK-NEXT: Kind: FUNCTION
+; CHECK-NEXT: Index: 1
; CHECK-NEXT: - Name: foo
; CHECK-NEXT: Kind: FUNCTION
-; CHECK-NEXT: Index: 0
+; CHECK-NEXT: Index: 2
; CHECK-NEXT: - Name: foo_alias
; CHECK-NEXT: Kind: FUNCTION
-; CHECK-NEXT: Index: 0
-
+; CHECK-NEXT: Index: 2
; CHECK: - Type: CUSTOM
; CHECK-NEXT: Name: name
; CHECK-NEXT: FunctionNames:
; CHECK-NEXT: - Index: 0
+; CHECK-NEXT: Name: foo_alias
+; CHECK-NEXT: - Index: 1
+; CHECK-NEXT: Name: call_alias
+; CHECK-NEXT: - Index: 2
; CHECK-NEXT: Name: foo
; CHECK-NEXT: - Type: CUSTOM
; CHECK-NEXT: Name: linking
diff --git a/test/Object/Inputs/trivial-object-test.wasm b/test/Object/Inputs/trivial-object-test.wasm
new file mode 100644
index 000000000000..1f3947ac472e
--- /dev/null
+++ b/test/Object/Inputs/trivial-object-test.wasm
Binary files differ
diff --git a/test/Object/Inputs/trivial.ll b/test/Object/Inputs/trivial.ll
index 37a6bc20a8c2..528a713c7fa3 100644
--- a/test/Object/Inputs/trivial.ll
+++ b/test/Object/Inputs/trivial.ll
@@ -1,3 +1,6 @@
+; Input used for generating checked-in binaries (trivial-object-test.*)
+; llc -mtriple=wasm32-unknown-unknown-wasm trivial.ll -filetype=obj -o trivial-object-test.wasm
+
@.str = private unnamed_addr constant [13 x i8] c"Hello World\0A\00", align 1
define i32 @main() nounwind {
diff --git a/test/Object/nm-trivial-object.test b/test/Object/nm-trivial-object.test
index c1f4d9e1f96f..f1aadd5cccf5 100644
--- a/test/Object/nm-trivial-object.test
+++ b/test/Object/nm-trivial-object.test
@@ -2,6 +2,8 @@ RUN: yaml2obj %p/Inputs/COFF/i386.yaml | llvm-nm -a -S - \
RUN: | FileCheck %s -check-prefix COFF32
RUN: yaml2obj %p/Inputs/COFF/x86-64.yaml | llvm-nm -a -S - \
RUN: | FileCheck %s -check-prefix COFF64
+RUN: llvm-nm %p/Inputs/trivial-object-test.wasm \
+RUN: | FileCheck %s -check-prefix WASM
RUN: llvm-nm %p/Inputs/trivial-object-test.elf-i386 \
RUN: | FileCheck %s -check-prefix ELF
RUN: llvm-nm %p/Inputs/trivial-object-test.elf-i386 -S \
@@ -57,6 +59,11 @@ COFF32-NEXT: U _SomeOtherFunction
COFF32-NEXT: 00000000 T _main
COFF32-NEXT: U _puts
+WASM: U SomeOtherFunction
+WASM-NEXT: 00000002 T main
+WASM-NEXT: U puts
+WASM-NEXT: 00000001 D var
+
COFF64: 00000000 d .data
COFF64-NEXT: 00000000 t .text
COFF64-NEXT: 00000000 r ??__Ex@@YAXXZ
diff --git a/test/Object/obj2yaml.test b/test/Object/obj2yaml.test
index b89311db6069..73d466cc4993 100644
--- a/test/Object/obj2yaml.test
+++ b/test/Object/obj2yaml.test
@@ -4,8 +4,8 @@ RUN: obj2yaml %p/Inputs/trivial-object-test.elf-mipsel | FileCheck %s --check-pr
RUN: obj2yaml %p/Inputs/trivial-object-test.elf-mips64el | FileCheck %s --check-prefix ELF-MIPS64EL
RUN: obj2yaml %p/Inputs/trivial-object-test.elf-x86-64 | FileCheck %s --check-prefix ELF-X86-64
RUN: obj2yaml %p/Inputs/trivial-object-test.elf-avr | FileCheck %s --check-prefix ELF-AVR
-RUN: obj2yaml %p/Inputs/unwind-section.elf-x86-64 \
-RUN: | FileCheck %s --check-prefix ELF-X86-64-UNWIND
+RUN: obj2yaml %p/Inputs/trivial-object-test.wasm | FileCheck %s --check-prefix WASM
+RUN: obj2yaml %p/Inputs/unwind-section.elf-x86-64 | FileCheck %s --check-prefix ELF-X86-64-UNWIND
COFF-I386: header:
COFF-I386-NEXT: Machine: IMAGE_FILE_MACHINE_I386
@@ -411,13 +411,13 @@ ELF-X86-64-NEXT: - Name: SomeOtherFunction
ELF-X86-64-NEXT: - Name: puts
-ELF-AVR: FileHeader:
+ELF-AVR: FileHeader:
ELF-AVR-NEXT: Class: ELFCLASS32
ELF-AVR-NEXT: Data: ELFDATA2LSB
ELF-AVR-NEXT: Type: ET_EXEC
ELF-AVR-NEXT: Machine: EM_AVR
ELF-AVR-NEXT: Flags: [ EF_AVR_ARCH_AVR2 ]
-ELF-AVR-NEXT: Sections:
+ELF-AVR-NEXT: Sections:
ELF-AVR-NEXT: - Name: .text
ELF-AVR-NEXT: Type: SHT_PROGBITS
ELF-AVR-NEXT: Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
@@ -429,8 +429,8 @@ ELF-AVR-NEXT: Flags: [ SHF_WRITE, SHF_ALLOC ]
ELF-AVR-NEXT: Address: 0x0000000000800060
ELF-AVR-NEXT: AddressAlign: 0x0000000000000001
ELF-AVR-NEXT: Content: ''
-ELF-AVR-NEXT: Symbols:
-ELF-AVR-NEXT: Local:
+ELF-AVR-NEXT: Symbols:
+ELF-AVR-NEXT: Local:
ELF-AVR-NEXT: - Type: STT_SECTION
ELF-AVR-NEXT: Section: .text
ELF-AVR-NEXT: - Type: STT_SECTION
@@ -440,7 +440,7 @@ ELF-AVR-NEXT: - Name: a.o
ELF-AVR-NEXT: Type: STT_FILE
ELF-AVR-NEXT: - Name: main
ELF-AVR-NEXT: Section: .text
-ELF-AVR-NEXT: Global:
+ELF-AVR-NEXT: Global:
ELF-AVR-NEXT: - Name: __trampolines_start
ELF-AVR-NEXT: Section: .text
ELF-AVR-NEXT: - Name: _etext
@@ -470,6 +470,17 @@ ELF-AVR-NEXT: - Name: _end
ELF-AVR-NEXT: Section: .data
ELF-AVR-NEXT: Value: 0x0000000000800060
+WASM: --- !WASM
+WASM-NEXT: FileHeader:
+WASM-NEXT: Version: 0x00000001
+WASM: - Type: EXPORT
+WASM-NEXT: Exports:
+WASM-NEXT: - Name: main
+WASM-NEXT: Kind: FUNCTION
+WASM-NEXT: Index: 2
+WASM-NEXT: - Name: var
+WASM-NEXT: Kind: GLOBAL
+WASM-NEXT: Index: 1
ELF-X86-64-UNWIND: - Name: .eh_frame
ELF-X86-64-UNWIND-NEXT: Type: SHT_X86_64_UNWIND
diff --git a/test/Object/objdump-relocations.test b/test/Object/objdump-relocations.test
index 1e41f78ca729..29f001962875 100644
--- a/test/Object/objdump-relocations.test
+++ b/test/Object/objdump-relocations.test
@@ -12,6 +12,8 @@ RUN: llvm-objdump -r %p/Inputs/trivial-object-test.elf-mips64el \
RUN: | FileCheck %s -check-prefix ELF-MIPS64EL
RUN: llvm-objdump -r %p/Inputs/trivial-object-test.elf-mipsel \
RUN: | FileCheck %s -check-prefix ELF-MIPSEL
+RUN: llvm-objdump -r %p/Inputs/trivial-object-test.wasm \
+RUN: | FileCheck %s -check-prefix WASM
RUN: llvm-objdump -r %p/Inputs/relocations.elf-x86-64 \
RUN: | FileCheck %s -check-prefix ELF-complex-x86-64
@@ -57,6 +59,11 @@ ELF-MIPSEL: R_MIPS_LO16 $.str
ELF-MIPSEL: R_MIPS_CALL16 puts
ELF-MIPSEL: R_MIPS_CALL16 SomeOtherFunction
+WASM: CODE
+WASM-NEXT: R_WEBASSEMBLY_GLOBAL_ADDR_SLEB 0+0
+WASM-NEXT: R_WEBASSEMBLY_FUNCTION_INDEX_LEB 0+0
+WASM-NEXT: R_WEBASSEMBLY_FUNCTION_INDEX_LEB 1+0
+
ELF-complex-x86-64: .text
ELF-complex-x86-64-NEXT: R_X86_64_8 .data-4
ELF-complex-x86-64-NEXT: R_X86_64_16 .data-4
diff --git a/test/ObjectYAML/wasm/data_section.yaml b/test/ObjectYAML/wasm/data_section.yaml
index b8c65abbff91..521aa5402784 100644
--- a/test/ObjectYAML/wasm/data_section.yaml
+++ b/test/ObjectYAML/wasm/data_section.yaml
@@ -8,7 +8,7 @@ Sections:
- Initial: 0x00000003
- Type: DATA
Segments:
- - Index: 0
+ - MemoryIndex: 0
Offset:
Opcode: I32_CONST
Value: 4
@@ -38,7 +38,8 @@ Sections:
# CHECK-NEXT: Offset: 0x00000006
# CHECK-NEXT: Addend: -6
# CHECK-NEXT: Segments:
-# CHECK-NEXT: - Index: 0
+# CHECK-NEXT: - SectionOffset: 6
+# CHECK-NEXT: MemoryIndex: 0
# CHECK-NEXT: Offset:
# CHECK-NEXT: Opcode: I32_CONST
# CHECK-NEXT: Value: 4
diff --git a/test/Other/2002-01-31-CallGraph.ll b/test/Other/2002-01-31-CallGraph.ll
index 0e4c87751263..d4819357ac67 100644
--- a/test/Other/2002-01-31-CallGraph.ll
+++ b/test/Other/2002-01-31-CallGraph.ll
@@ -1,6 +1,6 @@
; Call graph construction crash: Not handling indirect calls right
;
-; RUN: opt < %s -analyze -print-callgraph >& /dev/null
+; RUN: opt < %s -analyze -print-callgraph > /dev/null 2>&1
;
%FunTy = type i32 (i32)
diff --git a/test/Other/new-pm-defaults.ll b/test/Other/new-pm-defaults.ll
index fbecb34aa4b7..a0658c10d609 100644
--- a/test/Other/new-pm-defaults.ll
+++ b/test/Other/new-pm-defaults.ll
@@ -26,6 +26,37 @@
; RUN: -passes='lto-pre-link<O2>' -S %s 2>&1 \
; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes-ep-peephole='no-op-function' \
+; RUN: -passes='default<O3>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O3 \
+; RUN: --check-prefix=CHECK-EP-PEEPHOLE
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes-ep-late-loop-optimizations='no-op-loop' \
+; RUN: -passes='default<O3>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O3 \
+; RUN: --check-prefix=CHECK-EP-LOOP-LATE
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes-ep-loop-optimizer-end='no-op-loop' \
+; RUN: -passes='default<O3>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O3 \
+; RUN: --check-prefix=CHECK-EP-LOOP-END
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes-ep-scalar-optimizer-late='no-op-function' \
+; RUN: -passes='default<O3>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O3 \
+; RUN: --check-prefix=CHECK-EP-SCALAR-LATE
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes-ep-cgscc-optimizer-late='no-op-cgscc' \
+; RUN: -passes='default<O3>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O3 \
+; RUN: --check-prefix=CHECK-EP-CGSCC-LATE
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes-ep-vectorizer-start='no-op-function' \
+; RUN: -passes='default<O3>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O3 \
+; RUN: --check-prefix=CHECK-EP-VECTORIZER-START
+
; CHECK-O: Starting llvm::Module pass manager run.
; CHECK-O-NEXT: Running pass: PassManager<{{.*}}Module{{.*}}>
; CHECK-O-NEXT: Starting llvm::Module pass manager run.
@@ -53,6 +84,7 @@
; CHECK-O-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
; CHECK-O-NEXT: Starting llvm::Function pass manager run.
; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-EP-PEEPHOLE-NEXT: Running pass: NoOpFunctionPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Finished llvm::Function pass manager run.
; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}GlobalsAA
@@ -84,6 +116,7 @@
; CHECK-O1-NEXT: Running pass: LibCallsShrinkWrapPass
; CHECK-O2-NEXT: Running pass: LibCallsShrinkWrapPass
; CHECK-O3-NEXT: Running pass: LibCallsShrinkWrapPass
+; CHECK-EP-PEEPHOLE-NEXT: Running pass: NoOpFunctionPass
; CHECK-O-NEXT: Running pass: TailCallElimPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: ReassociatePass
@@ -105,8 +138,10 @@
; CHECK-O-NEXT: Starting Loop pass manager run.
; CHECK-O-NEXT: Running pass: IndVarSimplifyPass
; CHECK-O-NEXT: Running pass: LoopIdiomRecognizePass
+; CHECK-EP-LOOP-LATE-NEXT: Running pass: NoOpLoopPass
; CHECK-O-NEXT: Running pass: LoopDeletionPass
; CHECK-O-NEXT: Running pass: LoopUnrollPass
+; CHECK-EP-LOOP-END-NEXT: Running pass: NoOpLoopPass
; CHECK-O-NEXT: Finished Loop pass manager run.
; CHECK-Os-NEXT: Running pass: MergedLoadStoreMotionPass
; CHECK-Os-NEXT: Running pass: GVN
@@ -126,15 +161,19 @@
; CHECK-O-NEXT: Running pass: BDCEPass
; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-EP-PEEPHOLE-NEXT: Running pass: NoOpFunctionPass
; CHECK-O-NEXT: Running pass: JumpThreadingPass
; CHECK-O-NEXT: Running pass: CorrelatedValuePropagationPass
; CHECK-O-NEXT: Running pass: DSEPass
; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LICMPass{{.*}}>
+; CHECK-EP-SCALAR-LATE-NEXT: Running pass: NoOpFunctionPass
; CHECK-O-NEXT: Running pass: ADCEPass
; CHECK-O-NEXT: Running analysis: PostDominatorTreeAnalysis
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-EP-PEEPHOLE-NEXT: Running pass: NoOpFunctionPass
; CHECK-O-NEXT: Finished llvm::Function pass manager run.
+; CHECK-EP-CGSCC-LATE-NEXT: Running pass: NoOpCGSCCPass
; CHECK-O-NEXT: Finished CGSCC pass manager run.
; CHECK-O-NEXT: Finished llvm::Module pass manager run.
; CHECK-O-NEXT: Running pass: PassManager<{{.*}}Module{{.*}}>
@@ -146,6 +185,7 @@
; CHECK-O-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
; CHECK-O-NEXT: Starting llvm::Function pass manager run.
; CHECK-O-NEXT: Running pass: Float2IntPass
+; CHECK-EP-VECTORIZER-START-NEXT: Running pass: NoOpFunctionPass
; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LoopRotatePass
; CHECK-O-NEXT: Running pass: LoopDistributePass
; CHECK-O-NEXT: Running pass: LoopVectorizePass
diff --git a/test/Other/new-pm-lto-defaults.ll b/test/Other/new-pm-lto-defaults.ll
index dfd298353272..cab3965bf18f 100644
--- a/test/Other/new-pm-lto-defaults.ll
+++ b/test/Other/new-pm-lto-defaults.ll
@@ -17,6 +17,10 @@
; RUN: opt -disable-verify -debug-pass-manager \
; RUN: -passes='lto<Oz>' -S %s 2>&1 \
; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='lto<O3>' -S %s -passes-ep-peephole='no-op-function' 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2 \
+; RUN: --check-prefix=CHECK-EP-Peephole
; CHECK-O: Starting llvm::Module pass manager run.
; CHECK-O-NEXT: Running pass: PassManager<{{.*}}Module
@@ -45,13 +49,18 @@
; CHECK-O2-NEXT: Running analysis: AssumptionAnalysis
; CHECK-O2-NEXT: Running pass: ConstantMergePass
; CHECK-O2-NEXT: Running pass: DeadArgumentEliminationPass
-; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}InstCombinePass>
+; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
+; CHECK-O2-NEXT: Starting llvm::Function pass manager run.
+; CHECK-O2-NEXT: Running pass: InstCombinePass
+; CHECK-EP-Peephole-NEXT: Running pass: NoOpFunctionPass
+; CHECK-O2-NEXT: Finished llvm::Function pass manager run.
; CHECK-O2-NEXT: Running pass: ModuleToPostOrderCGSCCPassAdaptor<{{.*}}InlinerPass>
; CHECK-O2-NEXT: Running pass: GlobalOptPass
; CHECK-O2-NEXT: Running pass: GlobalDCEPass
; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
; CHECK-O2-NEXT: Starting llvm::Function pass manager run.
; CHECK-O2-NEXT: Running pass: InstCombinePass
+; CHECK-EP-Peephole-NEXT: Running pass: NoOpFunctionPass
; CHECK-O2-NEXT: Running pass: JumpThreadingPass
; CHECK-O2-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O2-NEXT: Running pass: SROA on foo
diff --git a/test/Other/pass-pipelines.ll b/test/Other/pass-pipelines.ll
index 971ed2c09473..d47c02ee7a46 100644
--- a/test/Other/pass-pipelines.ll
+++ b/test/Other/pass-pipelines.ll
@@ -24,7 +24,7 @@
; CHECK-O2: Dead Argument Elimination
; CHECK-O2-NEXT: FunctionPass Manager
; CHECK-O2-NOT: Manager
-; Very carefully asert the CGSCC pass pipeline as it is fragile and unusually
+; Very carefully assert the CGSCC pass pipeline as it is fragile and unusually
; susceptible to phase ordering issues.
; CHECK-O2: CallGraph Construction
; CHECK-O2-NEXT: Globals Alias Analysis
diff --git a/test/SafepointIRVerifier/basic-use-after-reloc.ll b/test/SafepointIRVerifier/basic-use-after-reloc.ll
new file mode 100644
index 000000000000..4b0746c9f527
--- /dev/null
+++ b/test/SafepointIRVerifier/basic-use-after-reloc.ll
@@ -0,0 +1,23 @@
+; RUN: opt -safepoint-ir-verifier-print-only -verify-safepoint-ir -S %s 2>&1 | FileCheck %s
+
+; This test checks that if a value is used immediately after a
+; safepoint without using the relocated value that the verifier
+; catches this.
+
+%jObject = type { [8 x i8] }
+
+; Function Attrs: nounwind
+define %jObject addrspace(1)* @test(%jObject addrspace(1)* %arg) gc "statepoint-example" {
+bci_0:
+ %safepoint_token3 = tail call token (i64, i32, double (double)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_f64f64f(i64 0, i32 0, double (double)* undef, i32 1, i32 0, double undef, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, %jObject addrspace(1)* %arg)
+ %arg2.relocated4 = call coldcc %jObject addrspace(1)* @llvm.experimental.gc.relocate.p1jObject(token %safepoint_token3, i32 13, i32 13)
+ ret %jObject addrspace(1)* %arg
+; CHECK: Illegal use of unrelocated value found!
+; CHECK-NEXT: Def: %jObject addrspace(1)* %arg
+; CHECK-NEXT: Use: ret %jObject addrspace(1)* %arg
+}
+
+; Function Attrs: nounwind
+declare %jObject addrspace(1)* @llvm.experimental.gc.relocate.p1jObject(token, i32, i32) #3
+
+declare token @llvm.experimental.gc.statepoint.p0f_f64f64f(i64, i32, double (double)*, i32, i32, ...)
diff --git a/test/SafepointIRVerifier/compares.ll b/test/SafepointIRVerifier/compares.ll
new file mode 100644
index 000000000000..a14fc44e9814
--- /dev/null
+++ b/test/SafepointIRVerifier/compares.ll
@@ -0,0 +1,85 @@
+; RUN: opt -safepoint-ir-verifier-print-only -verify-safepoint-ir -S %s 2>&1 | FileCheck %s
+
+; In some cases, it is valid to have unrelocated pointers used as compare
+; operands. Make sure the verifier knows to spot these exceptions.
+
+
+; comparison against null.
+define i8 addrspace(1)* @test1(i64 %arg, i8 addrspace(1)* %addr) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test1
+entry:
+ %load_addr = getelementptr i8, i8 addrspace(1)* %addr, i64 %arg
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ %cmp = icmp eq i8 addrspace(1)* %load_addr, null
+ ret i8 addrspace(1)* null
+}
+
+; comparison against exclusively derived null.
+define void @test2(i64 %arg, i1 %cond, i8 addrspace(1)* %addr) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test2
+ %load_addr = getelementptr i8, i8 addrspace(1)* null, i64 %arg
+ %load_addr_sel = select i1 %cond, i8 addrspace(1)* null, i8 addrspace(1)* %load_addr
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ %cmp = icmp eq i8 addrspace(1)* %addr, %load_addr_sel
+ ret void
+}
+
+; comparison against a constant non-null pointer. This is unrelocated use, since
+; that pointer bits may mean something in a VM.
+define void @test3(i64 %arg, i32 addrspace(1)* %addr) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test3
+; CHECK: Illegal use of unrelocated value found!
+entry:
+ %load_addr = getelementptr i32, i32 addrspace(1)* %addr, i64 %arg
+ %load_addr_const = getelementptr i32, i32 addrspace(1)* inttoptr (i64 15 to i32 addrspace(1)*), i64 %arg
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ %cmp = icmp eq i32 addrspace(1)* %load_addr, %load_addr_const
+ ret void
+}
+
+; comparison against a derived pointer that is *not* exclusively derived from
+; null. An unrelocated use since the derived pointer could be from the constant
+; non-null pointer (load_addr.2).
+define void @test4(i64 %arg, i1 %cond, i8 addrspace(1)* %base) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test4
+; CHECK: Illegal use of unrelocated value found!
+entry:
+ %load_addr.1 = getelementptr i8, i8 addrspace(1)* null, i64 %arg
+ br i1 %cond, label %split, label %join
+
+split:
+ %load_addr.2 = getelementptr i8, i8 addrspace(1)* inttoptr (i64 30 to i8 addrspace(1)*), i64 %arg
+ br label %join
+
+join:
+ %load_addr = phi i8 addrspace(1)* [%load_addr.1, %entry], [%load_addr.2, %split]
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ %cmp = icmp eq i8 addrspace(1)* %load_addr, %base
+ ret void
+}
+
+; comparison between 2 unrelocated base pointers.
+; Since the cmp can be reordered legally before the safepoint, these are correct
+; unrelocated uses of the pointers.
+define void @test5(i64 %arg, i8 addrspace(1)* %base1, i8 addrspace(1)* %base2) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test5
+ %load_addr1 = getelementptr i8, i8 addrspace(1)* %base1, i64 %arg
+ %load_addr2 = getelementptr i8, i8 addrspace(1)* %base2, i64 %arg
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ %cmp = icmp eq i8 addrspace(1)* %load_addr1, %load_addr2
+ ret void
+}
+
+; comparison between a relocated and an unrelocated pointer.
+; this is invalid use of the unrelocated pointer.
+define void @test6(i64 %arg, i8 addrspace(1)* %base1, i8 addrspace(1)* %base2) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test6
+; CHECK: Illegal use of unrelocated value found!
+ %load_addr1 = getelementptr i8, i8 addrspace(1)* %base1, i64 %arg
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 0, i8 addrspace(1)* %base2 , i32 -1, i32 0, i32 0, i32 0)
+ %ptr2.relocated = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %safepoint_token, i32 7, i32 7) ; base2, base2
+ %cmp = icmp eq i8 addrspace(1)* %load_addr1, %ptr2.relocated
+ ret void
+}
+declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32, i32)
diff --git a/test/SafepointIRVerifier/constant-bases.ll b/test/SafepointIRVerifier/constant-bases.ll
new file mode 100644
index 000000000000..52a2a46d068d
--- /dev/null
+++ b/test/SafepointIRVerifier/constant-bases.ll
@@ -0,0 +1,70 @@
+; RUN: opt -safepoint-ir-verifier-print-only -verify-safepoint-ir -S %s 2>&1 | FileCheck %s
+
+define i8 addrspace(1)* @test1(i64 %arg) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test1
+entry:
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* null
+}
+
+define i8 addrspace(1)* @test2(i64 %arg) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test2
+entry:
+ %load_addr = getelementptr i8, i8 addrspace(1)* inttoptr (i64 15 to i8 addrspace(1)*), i64 %arg
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %load_addr
+}
+
+define i8 addrspace(1)* @test3(i64 %arg) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test3
+entry:
+ %load_addr = getelementptr i32, i32 addrspace(1)* inttoptr (i64 15 to i32 addrspace(1)*), i64 %arg
+ %load_addr.cast = bitcast i32 addrspace(1)* %load_addr to i8 addrspace(1)*
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %load_addr.cast
+}
+
+define i8 addrspace(1)* @test4(i64 %arg, i1 %cond) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test4
+entry:
+ %load_addr.1 = getelementptr i8, i8 addrspace(1)* inttoptr (i64 15 to i8 addrspace(1)*), i64 %arg
+ br i1 %cond, label %split, label %join
+
+split:
+ %load_addr.2 = getelementptr i8, i8 addrspace(1)* inttoptr (i64 30 to i8 addrspace(1)*), i64 %arg
+ br label %join
+
+join:
+ %load_addr = phi i8 addrspace(1)* [%load_addr.1, %entry], [%load_addr.2, %split]
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %load_addr
+}
+
+define i8 addrspace(1)* @test5(i64 %arg, i1 %cond) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test5
+entry:
+ %load_addr.1 = getelementptr i8, i8 addrspace(1)* inttoptr (i64 15 to i8 addrspace(1)*), i64 %arg
+ %load_addr.2 = getelementptr i8, i8 addrspace(1)* inttoptr (i64 30 to i8 addrspace(1)*), i64 %arg
+ %load_addr = select i1 %cond, i8 addrspace(1)* %load_addr.1, i8 addrspace(1)* %load_addr.2
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %load_addr
+}
+
+define i8 addrspace(1)* @test6(i64 %arg, i1 %cond, i8 addrspace(1)* %base) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test6
+; CHECK: Illegal use of unrelocated value found!
+entry:
+ %load_addr.1 = getelementptr i8, i8 addrspace(1)* %base, i64 %arg
+ br i1 %cond, label %split, label %join
+
+split:
+ %load_addr.2 = getelementptr i8, i8 addrspace(1)* inttoptr (i64 30 to i8 addrspace(1)*), i64 %arg
+ br label %join
+
+join:
+ %load_addr = phi i8 addrspace(1)* [%load_addr.1, %entry], [%load_addr.2, %split]
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %load_addr
+}
+
+declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
diff --git a/test/SafepointIRVerifier/unrecorded-live-at-sp.ll b/test/SafepointIRVerifier/unrecorded-live-at-sp.ll
new file mode 100644
index 000000000000..e3f21c3e7133
--- /dev/null
+++ b/test/SafepointIRVerifier/unrecorded-live-at-sp.ll
@@ -0,0 +1,71 @@
+; RUN: opt %s -safepoint-ir-verifier-print-only -verify-safepoint-ir -S 2>&1 | FileCheck %s
+
+; CHECK: Illegal use of unrelocated value found!
+; CHECK-NEXT: Def: %base_phi3 = phi %jObject addrspace(1)* [ %obj609.relocated, %not_zero146 ], [ %base_phi2, %bci_37-aload ], !is_base_value !0
+; CHECK-NEXT: Use: %base_phi2 = phi %jObject addrspace(1)* [ %base_phi3, %not_zero179 ], [ %cast5, %bci_0 ], !is_base_value !0
+
+%jObject = type { [8 x i8] }
+
+declare %jObject addrspace(1)* @generate_obj1() #1
+
+declare %jObject addrspace(1)* addrspace(1)* @generate_obj2() #1
+
+declare %jObject addrspace(1)* @generate_obj3() #1
+
+; Function Attrs: nounwind
+define void @test(%jObject addrspace(1)*, %jObject addrspace(1)*, i32) #3 gc "statepoint-example" {
+bci_0:
+ %result608 = call %jObject addrspace(1)* @generate_obj3()
+ %obj609 = bitcast %jObject addrspace(1)* %result608 to %jObject addrspace(1)*
+ %cast = bitcast %jObject addrspace(1)* %result608 to %jObject addrspace(1)*
+ %cast5 = bitcast %jObject addrspace(1)* %result608 to %jObject addrspace(1)*
+ br label %bci_37-aload
+
+bci_37-aload: ; preds = %not_zero179, %bci_0
+ %base_phi = phi %jObject addrspace(1)* [ %base_phi1.relocated, %not_zero179 ], [ %cast, %bci_0 ], !is_base_value !0
+ %base_phi2 = phi %jObject addrspace(1)* [ %base_phi3, %not_zero179 ], [ %cast5, %bci_0 ], !is_base_value !0
+ %relocated8 = phi %jObject addrspace(1)* [ %relocated7.relocated, %not_zero179 ], [ %obj609, %bci_0 ]
+ %tmp3 = getelementptr inbounds %jObject, %jObject addrspace(1)* %relocated8, i64 0, i32 0, i64 32
+ %addr98 = bitcast i8 addrspace(1)* %tmp3 to %jObject addrspace(1)* addrspace(1)*
+ %cast6 = bitcast %jObject addrspace(1)* %base_phi2 to %jObject addrspace(1)* addrspace(1)*
+ br i1 undef, label %not_zero179, label %not_zero146
+
+not_zero146: ; preds = %bci_37-aload
+ %addr98.relocated = call %jObject addrspace(1)* addrspace(1)* @generate_obj2() #1
+ %obj609.relocated = call %jObject addrspace(1)* @generate_obj1() #1
+ br label %not_zero179
+
+not_zero179: ; preds = %not_zero146, %bci_37-aload
+ %base_phi1 = phi %jObject addrspace(1)* [ %obj609.relocated, %not_zero146 ], [ %base_phi, %bci_37-aload ], !is_base_value !0
+ %base_phi3 = phi %jObject addrspace(1)* [ %obj609.relocated, %not_zero146 ], [ %base_phi2, %bci_37-aload ], !is_base_value !0
+ %relocated7 = phi %jObject addrspace(1)* [ %obj609.relocated, %not_zero146 ], [ %relocated8, %bci_37-aload ]
+ %base_phi4 = phi %jObject addrspace(1)* addrspace(1)* [ %addr98.relocated, %not_zero146 ], [ %cast6, %bci_37-aload ], !is_base_value !0
+ %relocated4 = phi %jObject addrspace(1)* addrspace(1)* [ %addr98.relocated, %not_zero146 ], [ %addr98, %bci_37-aload ]
+ %safepoint_token = tail call token (i64, i32, i32 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i32f(i64 0, i32 0, i32 ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0, %jObject addrspace(1)* %base_phi1, %jObject addrspace(1)* addrspace(1)* %base_phi4, %jObject addrspace(1)* addrspace(1)* %relocated4, %jObject addrspace(1)* %relocated7)
+ %tmp4 = call i32 @llvm.experimental.gc.result.i32(token %safepoint_token)
+ %base_phi1.relocated = call coldcc %jObject addrspace(1)* @llvm.experimental.gc.relocate.p1jObject(token %safepoint_token, i32 12, i32 12)
+ %base_phi4.relocated = call coldcc %jObject addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1jObject(token %safepoint_token, i32 13, i32 13)
+ %relocated4.relocated = call coldcc %jObject addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1jObject(token %safepoint_token, i32 13, i32 14)
+ %relocated7.relocated = call coldcc %jObject addrspace(1)* @llvm.experimental.gc.relocate.p1jObject(token %safepoint_token, i32 12, i32 15)
+ %addr636 = bitcast %jObject addrspace(1)* addrspace(1)* %relocated4.relocated to %jObject addrspace(1)* addrspace(1)*
+ br label %bci_37-aload
+}
+
+declare token @llvm.experimental.gc.statepoint.p0f_i32f(i64, i32, i32 ()*, i32, i32, ...)
+
+; Function Attrs: nounwind
+declare i32 @llvm.experimental.gc.result.i32(token) #4
+
+; Function Attrs: nounwind
+declare %jObject addrspace(1)* @llvm.experimental.gc.relocate.p1jObject(token, i32, i32) #4
+
+; Function Attrs: nounwind
+declare %jObject addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1jObject(token, i32, i32) #4
+
+attributes #0 = { noinline nounwind "gc-leaf-function"="true" }
+attributes #1 = { "gc-leaf-function"="true" }
+attributes #2 = { nounwind readonly "gc-leaf-function"="true" }
+attributes #3 = { nounwind }
+attributes #4 = { nounwind }
+
+!0 = !{i32 1}
diff --git a/test/SafepointIRVerifier/uses-in-phi-nodes.ll b/test/SafepointIRVerifier/uses-in-phi-nodes.ll
new file mode 100644
index 000000000000..d06eb6e0d9a7
--- /dev/null
+++ b/test/SafepointIRVerifier/uses-in-phi-nodes.ll
@@ -0,0 +1,78 @@
+; RUN: opt -safepoint-ir-verifier-print-only -verify-safepoint-ir -S %s 2>&1 | FileCheck %s
+
+define i8 addrspace(1)* @test.not.ok.0(i8 addrspace(1)* %arg) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test.not.ok.0
+ bci_0:
+ br i1 undef, label %left, label %right
+
+ left:
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ br label %merge
+
+ right:
+ br label %merge
+
+ merge:
+; CHECK: Illegal use of unrelocated value found!
+; CHECK-NEXT: Def: i8 addrspace(1)* %arg
+; CHECK-NEXT: Use: %val = phi i8 addrspace(1)* [ %arg, %left ], [ %arg, %right ]
+ %val = phi i8 addrspace(1)* [ %arg, %left ], [ %arg, %right]
+ ret i8 addrspace(1)* %val
+}
+
+define i8 addrspace(1)* @test.not.ok.1(i8 addrspace(1)* %arg) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test.not.ok.1
+ bci_0:
+ br i1 undef, label %left, label %right
+
+ left:
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ br label %merge
+
+ right:
+ br label %merge
+
+ merge:
+; CHECK: Illegal use of unrelocated value found!
+; CHECK-NEXT: Def: i8 addrspace(1)* %arg
+; CHECK-NEXT: Use: %val = phi i8 addrspace(1)* [ %arg, %left ], [ null, %right ]
+ %val = phi i8 addrspace(1)* [ %arg, %left ], [ null, %right]
+ ret i8 addrspace(1)* %val
+}
+
+define i8 addrspace(1)* @test.ok.0(i8 addrspace(1)* %arg) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test.ok.0
+ bci_0:
+ br i1 undef, label %left, label %right
+
+ left:
+ %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ br label %merge
+
+ right:
+ br label %merge
+
+ merge:
+ %val = phi i8 addrspace(1)* [ null, %left ], [ null, %right]
+ ret i8 addrspace(1)* %val
+}
+
+define i8 addrspace(1)* @test.ok.1(i8 addrspace(1)* %arg) gc "statepoint-example" {
+; CHECK: No illegal uses found by SafepointIRVerifier in: test.ok.1
+ bci_0:
+ br i1 undef, label %left, label %right
+
+ left:
+ call void @not_statepoint()
+ br label %merge
+
+ right:
+ br label %merge
+
+ merge:
+ %val = phi i8 addrspace(1)* [ %arg, %left ], [ %arg, %right]
+ ret i8 addrspace(1)* %val
+}
+
+declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+declare void @not_statepoint()
diff --git a/test/TableGen/AsmVariant.td b/test/TableGen/AsmVariant.td
index cb5d32385d3b..70d410ee7bd0 100644
--- a/test/TableGen/AsmVariant.td
+++ b/test/TableGen/AsmVariant.td
@@ -31,6 +31,7 @@ def foo : Instruction {
let InOperandList = (ins);
let AsmString = "foo";
let AsmVariantName = "Foo";
+ let Namespace = "Arch";
}
def BarAlias : InstAlias<"bar", (foo)> {
diff --git a/test/TableGen/GlobalISelEmitter.td b/test/TableGen/GlobalISelEmitter.td
index 7c09b97a5e99..114d0e23b855 100644
--- a/test/TableGen/GlobalISelEmitter.td
+++ b/test/TableGen/GlobalISelEmitter.td
@@ -7,6 +7,10 @@ include "llvm/Target/Target.td"
def MyTargetISA : InstrInfo;
def MyTarget : Target { let InstructionSet = MyTargetISA; }
+let TargetPrefix = "mytarget" in {
+def int_mytarget_nop : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+}
+
def R0 : Register<"r0"> { let Namespace = "MyTarget"; }
def GPR32 : RegisterClass<"MyTarget", [i32], 32, (add R0)>;
def GPR32Op : RegisterOperand<GPR32>;
@@ -38,6 +42,23 @@ def HasC : Predicate<"Subtarget->hasC()"> { let RecomputePerFunction = 1; }
//===- Test the function boilerplate. -------------------------------------===//
+// CHECK: const unsigned MAX_SUBTARGET_PREDICATES = 3;
+// CHECK: using PredicateBitset = llvm::PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;
+
+// CHECK-LABEL: #ifdef GET_GLOBALISEL_TEMPORARIES_DECL
+// CHECK-NEXT: mutable MatcherState State;
+// CHECK-NEXT: typedef ComplexRendererFn(MyTargetInstructionSelector::*ComplexMatcherMemFn)(MachineOperand &) const;
+// CHECK-NEXT: const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> MatcherInfo;
+// CHECK-NEXT: #endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL
+
+// CHECK-LABEL: #ifdef GET_GLOBALISEL_TEMPORARIES_INIT
+// CHECK-NEXT: , State(2),
+// CHECK-NEXT: MatcherInfo({TypeObjects, FeatureBitsets, {
+// CHECK-NEXT: nullptr, // GICP_Invalid
+// CHECK-NEXT: &MyTargetInstructionSelector::selectComplexPattern, // gi_complex
+// CHECK-NEXT: }})
+// CHECK-NEXT: #endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT
+
// CHECK-LABEL: enum SubtargetFeatureBits : uint8_t {
// CHECK-NEXT: Feature_HasABit = 0,
// CHECK-NEXT: Feature_HasBBit = 1,
@@ -63,39 +84,104 @@ def HasC : Predicate<"Subtarget->hasC()"> { let RecomputePerFunction = 1; }
// CHECK-NEXT: }
// CHECK: bool MyTargetInstructionSelector::selectImpl(MachineInstr &I) const {
-// CHECK: MachineFunction &MF = *I.getParent()->getParent();
-// CHECK: const MachineRegisterInfo &MRI = MF.getRegInfo();
+// CHECK-NEXT: MachineFunction &MF = *I.getParent()->getParent();
+// CHECK-NEXT: MachineRegisterInfo &MRI = MF.getRegInfo();
+// CHECK: AvailableFunctionFeatures = computeAvailableFunctionFeatures(&STI, &MF);
+// CHECK-NEXT: const PredicateBitset AvailableFeatures = getAvailableFeatures();
+// CHECK-NEXT: NewMIVector OutMIs;
+// CHECK-NEXT: State.MIs.clear();
+// CHECK-NEXT: State.MIs.push_back(&I);
+
+//===- Test a pattern with multiple ComplexPatterns in multiple instrs ----===//
+//
+
+// CHECK-LABEL: MatchTable0[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/4,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_SELECT,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckComplexPattern, /*MI*/0, /*Op*/2, /*Renderer*/0, GICP_gi_complex,
+// CHECK-NEXT: // MIs[0] src3
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/3, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckComplexPattern, /*MI*/0, /*Op*/3, /*Renderer*/1, GICP_gi_complex,
+// CHECK-NEXT: // (select:i32 GPR32:i32:$src1, complex:i32:$src2, complex:i32:$src3) => (INSN2:i32 GPR32:i32:$src1, complex:i32:$src3, complex:i32:$src2)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::INSN2,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_ComplexRenderer, /*InsnID*/0, /*RendererID*/1,
+// CHECK-NEXT: GIR_ComplexRenderer, /*InsnID*/0, /*RendererID*/0,
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable0\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable0, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+
+def INSN3 : I<(outs GPR32:$dst),
+ (ins GPR32Op:$src1, complex:$src2, GPR32:$src3, complex:$src4, complex:$src5), []>;
+def : Pat<(select GPR32:$src1, complex:$src2, (select GPR32:$src3, complex:$src4, complex:$src5)),
+ (INSN3 GPR32:$src1, complex:$src2, GPR32:$src3, complex:$src4, complex:$src5)>;
//===- Test a pattern with multiple ComplexPattern operands. --------------===//
//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 4)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_SELECT) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((Renderer0 = selectComplexPattern(MI0.getOperand(2)))))) &&
-// CHECK-NEXT: ((/* src3 */ (MRI.getType(MI0.getOperand(3).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((Renderer1 = selectComplexPattern(MI0.getOperand(3))))))) {
-// CHECK-NEXT: // (select:i32 GPR32:i32:$src1, complex:i32:$src2, complex:i32:$src3) => (INSN2:i32 GPR32:i32:$src1, complex:i32:$src3, complex:i32:$src2)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::INSN2));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
-// CHECK-NEXT: Renderer1(MIB);
-// CHECK-NEXT: Renderer0(MIB);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
+// CHECK-LABEL: MatchTable1[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/4,
+// CHECK-NEXT: GIM_RecordInsn, /*DefineMI*/1, /*MI*/0, /*OpIdx*/3, // MIs[1]
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/1, /*Expected*/4,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_SELECT,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckComplexPattern, /*MI*/0, /*Op*/2, /*Renderer*/0, GICP_gi_complex,
+// CHECK-NEXT: // MIs[0] Operand 3
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/3, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_SELECT,
+// CHECK-NEXT: // MIs[1] Operand 0
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: // MIs[1] src3
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/1, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[1] src4
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckComplexPattern, /*MI*/1, /*Op*/2, /*Renderer*/1, GICP_gi_complex,
+// CHECK-NEXT: // MIs[1] src5
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/3, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckComplexPattern, /*MI*/1, /*Op*/3, /*Renderer*/2, GICP_gi_complex,
+// CHECK-NEXT: GIM_CheckIsSafeToFold, /*InsnID*/1,
+// CHECK-NEXT: // (select:i32 GPR32:i32:$src1, complex:i32:$src2, (select:i32 GPR32:i32:$src3, complex:i32:$src4, complex:i32:$src5)) => (INSN3:i32 GPR32:i32:$src1, complex:i32:$src2, GPR32:i32:$src3, complex:i32:$src4, complex:i32:$src5)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::INSN3,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_ComplexRenderer, /*InsnID*/0, /*RendererID*/0,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/1, // src3
+// CHECK-NEXT: GIR_ComplexRenderer, /*InsnID*/0, /*RendererID*/1,
+// CHECK-NEXT: GIR_ComplexRenderer, /*InsnID*/0, /*RendererID*/2,
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable1\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable1, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def : GINodeEquiv<G_SELECT, select>;
def INSN2 : I<(outs GPR32:$dst), (ins GPR32Op:$src1, complex:$src2, complex:$src3), []>;
@@ -104,119 +190,149 @@ def : Pat<(select GPR32:$src1, complex:$src2, complex:$src3),
//===- Test a simple pattern with regclass operands. ----------------------===//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_ADD) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(2).getReg(), MRI, TRI)))))) {
-
-// CHECK-NEXT: // (add:i32 GPR32:i32:$src1, GPR32:i32:$src2) => (ADD:i32 GPR32:i32:$src1, GPR32:i32:$src2)
-// CHECK-NEXT: I.setDesc(TII.get(MyTarget::ADD));
-// CHECK-NEXT: MachineInstr &NewI = I;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
-
+// CHECK-LABEL: MatchTable2[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_ADD,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID
+// CHECK-NEXT: // MIs[0] src2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/2, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // (add:i32 GPR32:i32:$src1, GPR32:i32:$src2) => (ADD:i32 GPR32:i32:$src1, GPR32:i32:$src2)
+// CHECK-NEXT: GIR_MutateOpcode, /*InsnID*/0, /*RecycleInsnID*/ 0, /*Opcode*/MyTarget::ADD,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable2\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable2, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def ADD : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2),
[(set GPR32:$dst, (add GPR32:$src1, GPR32:$src2))]>;
+//===- Test a simple pattern with an intrinsic. ---------------------------===//
+//
+
+// CHECK-LABEL: MatchTable3[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_INTRINSIC,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 1
+// CHECK-NEXT: GIM_CheckIntrinsicID, /*MI*/0, /*Op*/1, Intrinsic::mytarget_nop,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/2, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // (intrinsic_wo_chain:i32 [[ID:[0-9]+]]:iPTR, GPR32:i32:$src1) => (MOV:i32 GPR32:i32:$src1)
+
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::MOV,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/2, // src1
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable3\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable3, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+
+def MOV : I<(outs GPR32:$dst), (ins GPR32:$src1),
+ [(set GPR32:$dst, (int_mytarget_nop GPR32:$src1))]>;
+
//===- Test a nested instruction match. -----------------------------------===//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: PredicateBitset ExpectedFeatures = {Feature_HasABit};
-// CHECK-NEXT: if ((AvailableFeatures & ExpectedFeatures) != ExpectedFeatures)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if (!MI0.getOperand(1).isReg())
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if (TRI.isPhysicalRegister(MI0.getOperand(1).getReg()))
-// CHECK-NEXT: return false;
-// CHECK-NEXT: MachineInstr &MI1 = *MRI.getVRegDef(MI0.getOperand(1).getReg());
-// CHECK-NEXT: if (MI1.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_MUL) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Operand 1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: (((MI1.getOpcode() == TargetOpcode::G_ADD) &&
-// CHECK-NEXT: ((/* Operand 0 */ (MRI.getType(MI1.getOperand(0).getReg()) == (LLT::scalar(32))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI1.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI1.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI1.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI1.getOperand(2).getReg(), MRI, TRI))))))
-// CHECK-NEXT: ))) &&
-// CHECK-NEXT: ((/* src3 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(2).getReg(), MRI, TRI)))))) {
-// CHECK-NEXT: if (!isObviouslySafeToFold(MI1)) return false;
-// CHECK-NEXT: // (mul:i32 (add:i32 GPR32:i32:$src1, GPR32:i32:$src2), GPR32:i32:$src3) => (MULADD:i32 GPR32:i32:$src1, GPR32:i32:$src2, GPR32:i32:$src3)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::MULADD));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.add(MI1.getOperand(1)/*src1*/);
-// CHECK-NEXT: MIB.add(MI1.getOperand(2)/*src2*/);
-// CHECK-NEXT: MIB.add(MI0.getOperand(2)/*src3*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, &MI1, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
+// CHECK-LABEL: MatchTable4[] = {
+// CHECK-NEXT: GIM_CheckFeatures, GIFBS_HasA,
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_RecordInsn, /*DefineMI*/1, /*MI*/0, /*OpIdx*/1, // MIs[1]
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/1, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_MUL,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_ADD,
+// CHECK-NEXT: // MIs[1] Operand 0
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: // MIs[1] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/1, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[1] src2
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/1, /*Op*/2, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src3
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/2, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: GIM_CheckIsSafeToFold, /*InsnID*/1,
+// CHECK-NEXT: // (mul:i32 (add:i32 GPR32:i32:$src1, GPR32:i32:$src2), GPR32:i32:$src3) => (MULADD:i32 GPR32:i32:$src1, GPR32:i32:$src2, GPR32:i32:$src3)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::MULADD,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/2, // src2
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/2, // src3
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable4\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable4, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
// We also get a second rule by commutativity.
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: PredicateBitset ExpectedFeatures = {Feature_HasABit};
-// CHECK-NEXT: if ((AvailableFeatures & ExpectedFeatures) != ExpectedFeatures)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if (!MI0.getOperand(2).isReg())
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if (TRI.isPhysicalRegister(MI0.getOperand(2).getReg()))
-// CHECK-NEXT: return false;
-// CHECK-NEXT: MachineInstr &MI1 = *MRI.getVRegDef(MI0.getOperand(2).getReg());
-// CHECK-NEXT: if (MI1.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_MUL) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src3 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: (((MI1.getOpcode() == TargetOpcode::G_ADD) &&
-// CHECK-NEXT: ((/* Operand 0 */ (MRI.getType(MI1.getOperand(0).getReg()) == (LLT::scalar(32))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI1.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI1.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI1.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI1.getOperand(2).getReg(), MRI, TRI))))))
-// CHECK-NEXT: )))) {
-// CHECK-NEXT: if (!isObviouslySafeToFold(MI1)) return false;
-// CHECK-NEXT: // (mul:i32 GPR32:i32:$src3, (add:i32 GPR32:i32:$src1, GPR32:i32:$src2)) => (MULADD:i32 GPR32:i32:$src1, GPR32:i32:$src2, GPR32:i32:$src3)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::MULADD));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.add(MI1.getOperand(1)/*src1*/);
-// CHECK-NEXT: MIB.add(MI1.getOperand(2)/*src2*/);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src3*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, &MI1, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
+// CHECK-LABEL: MatchTable5[] = {
+// CHECK-NEXT: GIM_CheckFeatures, GIFBS_HasA,
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_RecordInsn, /*DefineMI*/1, /*MI*/0, /*OpIdx*/2,
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/1, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_MUL,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src3
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_ADD,
+// CHECK-NEXT: // MIs[1] Operand 0
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: // MIs[1] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/1, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[1] src2
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/1, /*Op*/2, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: GIM_CheckIsSafeToFold, /*InsnID*/1,
+// CHECK-NEXT: // (mul:i32 GPR32:i32:$src3, (add:i32 GPR32:i32:$src1, GPR32:i32:$src2)) => (MULADD:i32 GPR32:i32:$src1, GPR32:i32:$src2, GPR32:i32:$src3)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::MULADD,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/2, // src2
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src3
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable5\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable5, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def MULADD : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3),
[(set GPR32:$dst,
@@ -225,67 +341,129 @@ def MULADD : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3),
//===- Test another simple pattern with regclass operands. ----------------===//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: PredicateBitset ExpectedFeatures = {Feature_HasABit, Feature_HasBBit, Feature_HasCBit};
-// CHECK-NEXT: if ((AvailableFeatures & ExpectedFeatures) != ExpectedFeatures)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_MUL) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(2).getReg(), MRI, TRI)))))) {
-// CHECK-NEXT: // (mul:i32 GPR32:i32:$src1, GPR32:i32:$src2) => (MUL:i32 GPR32:i32:$src2, GPR32:i32:$src1)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::MUL));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.add(MI0.getOperand(2)/*src2*/);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable6[] = {
+// CHECK-NEXT: GIM_CheckFeatures, GIFBS_HasA_HasB_HasC,
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_MUL,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/2, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // (mul:i32 GPR32:i32:$src1, GPR32:i32:$src2) => (MUL:i32 GPR32:i32:$src2, GPR32:i32:$src1)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::MUL,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/2, // src2
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable6\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable6, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def MUL : I<(outs GPR32:$dst), (ins GPR32:$src2, GPR32:$src1),
[(set GPR32:$dst, (mul GPR32:$src1, GPR32:$src2))]>,
Requires<[HasA, HasB, HasC]>;
+//===- Test a more complex multi-instruction match. -----------------------===//
+
+// CHECK-LABEL: MatchTable7[] = {
+// CHECK-NEXT: GIM_CheckFeatures, GIFBS_HasA,
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_RecordInsn, /*DefineMI*/1, /*MI*/0, /*OpIdx*/1, // MIs[1]
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/1, /*Expected*/3,
+// CHECK-NEXT: GIM_RecordInsn, /*DefineMI*/2, /*MI*/0, /*OpIdx*/2, // MIs[2]
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/2, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_SUB,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_SUB,
+// CHECK-NEXT: // MIs[1] Operand 0
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: // MIs[1] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/1, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[1] src2
+// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/1, /*Op*/2, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/2, TargetOpcode::G_SUB,
+// CHECK-NEXT: // MIs[2] Operand 0
+// CHECK-NEXT: GIM_CheckType, /*MI*/2, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: // MIs[2] src3
+// CHECK-NEXT: GIM_CheckType, /*MI*/2, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/2, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[2] src4
+// CHECK-NEXT: GIM_CheckType, /*MI*/2, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/2, /*Op*/2, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: GIM_CheckIsSafeToFold, /*InsnID*/1,
+// CHECK-NEXT: GIM_CheckIsSafeToFold, /*InsnID*/2,
+// CHECK-NEXT: // (sub:i32 (sub:i32 GPR32:i32:$src1, GPR32:i32:$src2), (sub:i32 GPR32:i32:$src3, GPR32:i32:$src4)) => (INSNBOB:i32 GPR32:i32:$src1, GPR32:i32:$src2, GPR32:i32:$src3, GPR32:i32:$src4)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::INSNBOB,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/2, // src2
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/2, /*OpIdx*/1, // src3
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/2, /*OpIdx*/2, // src4
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable7\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable7, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+
+def INSNBOB : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3, GPR32:$src4),
+ [(set GPR32:$dst,
+ (sub (sub GPR32:$src1, GPR32:$src2), (sub GPR32:$src3, GPR32:$src4)))]>,
+ Requires<[HasA]>;
+
//===- Test a pattern with ComplexPattern operands. -----------------------===//
//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_SUB) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((Renderer0 = selectComplexPattern(MI0.getOperand(2))))))) {
-// CHECK-NEXT: // (sub:i32 GPR32:i32:$src1, complex:i32:$src2) => (INSN1:i32 GPR32:i32:$src1, complex:i32:$src2)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::INSN1));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
-// CHECK-NEXT: Renderer0(MIB);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
+// CHECK-LABEL: MatchTable8[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_SUB,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckComplexPattern, /*MI*/0, /*Op*/2, /*Renderer*/0, GICP_gi_complex,
+// CHECK-NEXT: // (sub:i32 GPR32:i32:$src1, complex:i32:$src2) => (INSN1:i32 GPR32:i32:$src1, complex:i32:$src2)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::INSN1,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_ComplexRenderer, /*InsnID*/0, /*RendererID*/0,
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable8\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable8, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def INSN1 : I<(outs GPR32:$dst), (ins GPR32:$src1, complex:$src2), []>;
def : Pat<(sub GPR32:$src1, complex:$src2), (INSN1 GPR32:$src1, complex:$src2)>;
@@ -293,32 +471,33 @@ def : Pat<(sub GPR32:$src1, complex:$src2), (INSN1 GPR32:$src1, complex:$src2)>;
//===- Test a simple pattern with a default operand. ----------------------===//
//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -2, MRI))))) {
-// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -2:i32) => (XORI:i32 GPR32:i32:$src1)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::XORI));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.addImm(-1);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable9[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_XOR,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckConstantInt, /*MI*/0, /*Op*/2, -2
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -2:i32) => (XORI:i32 GPR32:i32:$src1)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::XORI,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_AddImm, /*InsnID*/0, /*Imm*/-1,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable9\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable9, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
// The -2 is just to distinguish it from the 'not' case below.
def XORI : I<(outs GPR32:$dst), (ins m1:$src2, GPR32:$src1),
@@ -327,32 +506,33 @@ def XORI : I<(outs GPR32:$dst), (ins m1:$src2, GPR32:$src1),
//===- Test a simple pattern with a default register operand. -------------===//
//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -3, MRI))))) {
-// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -3:i32) => (XOR:i32 GPR32:i32:$src1)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::XOR));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.addReg(MyTarget::R0);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable10[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_XOR,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckConstantInt, /*MI*/0, /*Op*/2, -3
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -3:i32) => (XOR:i32 GPR32:i32:$src1)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::XOR,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_AddRegister, /*InsnID*/0, MyTarget::R0,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable10\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable10, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
// The -3 is just to distinguish it from the 'not' case below and the other default op case above.
def XOR : I<(outs GPR32:$dst), (ins Z:$src2, GPR32:$src1),
@@ -361,33 +541,34 @@ def XOR : I<(outs GPR32:$dst), (ins Z:$src2, GPR32:$src1),
//===- Test a simple pattern with a multiple default operands. ------------===//
//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -4, MRI))))) {
-// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -4:i32) => (XORlike:i32 GPR32:i32:$src1)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::XORlike));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.addImm(-1);
-// CHECK-NEXT: MIB.addReg(MyTarget::R0);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable11[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_XOR,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckConstantInt, /*MI*/0, /*Op*/2, -4
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -4:i32) => (XORlike:i32 GPR32:i32:$src1)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::XORlike,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_AddImm, /*InsnID*/0, /*Imm*/-1,
+// CHECK-NEXT: GIR_AddRegister, /*InsnID*/0, MyTarget::R0,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable11\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable11, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
// The -4 is just to distinguish it from the other 'not' cases.
def XORlike : I<(outs GPR32:$dst), (ins m1Z:$src2, GPR32:$src1),
@@ -396,34 +577,35 @@ def XORlike : I<(outs GPR32:$dst), (ins m1Z:$src2, GPR32:$src1),
//===- Test a simple pattern with multiple operands with defaults. --------===//
//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -5, MRI))))) {
-// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -5:i32) => (XORManyDefaults:i32 GPR32:i32:$src1)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::XORManyDefaults));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.addImm(-1);
-// CHECK-NEXT: MIB.addReg(MyTarget::R0);
-// CHECK-NEXT: MIB.addReg(MyTarget::R0);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable12[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_XOR,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckConstantInt, /*MI*/0, /*Op*/2, -5,
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -5:i32) => (XORManyDefaults:i32 GPR32:i32:$src1)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::XORManyDefaults,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_AddImm, /*InsnID*/0, /*Imm*/-1,
+// CHECK-NEXT: GIR_AddRegister, /*InsnID*/0, MyTarget::R0,
+// CHECK-NEXT: GIR_AddRegister, /*InsnID*/0, MyTarget::R0,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // src1
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable12\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable12, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
// The -5 is just to distinguish it from the other cases.
def XORManyDefaults : I<(outs GPR32:$dst), (ins m1Z:$src3, Z:$src2, GPR32:$src1),
@@ -434,32 +616,33 @@ def XORManyDefaults : I<(outs GPR32:$dst), (ins m1Z:$src3, Z:$src2, GPR32:$src1)
// This must precede the 3-register variants because constant immediates have
// priority over register banks.
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 3)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Wm */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -1, MRI))))) {
-// CHECK-NEXT: // (xor:i32 GPR32:i32:$Wm, -1:i32) => (ORN:i32 R0:i32, GPR32:i32:$Wm)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::ORN));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: MIB.addReg(MyTarget::R0);
-// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*Wm*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable13[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_XOR,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Wm
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 2
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckConstantInt, /*MI*/0, /*Op*/2, -1,
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$Wm, -1:i32) => (ORN:i32 R0:i32, GPR32:i32:$Wm)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::ORN,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_AddRegister, /*InsnID*/0, MyTarget::R0,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/1, // Wm
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable13\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable13, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def ORN : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2), []>;
def : Pat<(not GPR32:$Wm), (ORN R0, GPR32:$Wm)>;
@@ -467,70 +650,72 @@ def : Pat<(not GPR32:$Wm), (ORN R0, GPR32:$Wm)>;
//===- Test a COPY_TO_REGCLASS --------------------------------------------===//
//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 2)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_BITCAST) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::FPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))))
-// CHECK-NEXT: // (bitconvert:i32 FPR32:f32:$src1) => (COPY_TO_REGCLASS:i32 FPR32:f32:$src1, GPR32:i32)
-// CHECK-NEXT: I.setDesc(TII.get(TargetOpcode::COPY));
-// CHECK-NEXT: MachineInstr &NewI = I;
-// CHECK-NEXT: constrainOperandRegToRegClass(NewI, 0, MyTarget::GPR32RegClass, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable14[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_BITCAST,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] src1
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/1, /*RC*/MyTarget::FPR32RegClassID,
+// CHECK-NEXT: // (bitconvert:i32 FPR32:f32:$src1) => (COPY_TO_REGCLASS:i32 FPR32:f32:$src1, GPR32:i32)
+// CHECK-NEXT: GIR_MutateOpcode, /*InsnID*/0, /*RecycleInsnID*/ 0, /*Opcode*/TargetOpcode::COPY,
+// CHECK-NEXT: GIR_ConstrainOperandRC, /*InsnID*/0, /*Op*/0, /*RC GPR32*/ 1,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable14\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable14, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def : Pat<(i32 (bitconvert FPR32:$src1)),
(COPY_TO_REGCLASS FPR32:$src1, GPR32)>;
//===- Test a simple pattern with just a leaf immediate. ------------------===//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 2)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_CONSTANT) &&
-// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
-// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
-// CHECK-NEXT: ((/* Operand 1 */ (MI0.getOperand(1).isCImm() && MI0.getOperand(1).getCImm()->equalsInt(1))))) {
-// CHECK-NEXT: // 1:i32 => (MOV1:i32)
-// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::MOV1));
-// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
-// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
-// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
-// CHECK-NEXT: MIB.addMemOperand(MMO);
-// CHECK-NEXT: I.eraseFromParent();
-// CHECK-NEXT: MachineInstr &NewI = *MIB;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable15[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_CONSTANT,
+// CHECK-NEXT: // MIs[0] dst
+// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
+// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
+// CHECK-NEXT: // MIs[0] Operand 1
+// CHECK-NEXT: GIM_CheckLiteralInt, /*MI*/0, /*Op*/1, 1,
+// CHECK-NEXT: // 1:i32 => (MOV1:i32)
+// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/MyTarget::MOV1,
+// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst
+// CHECK-NEXT: GIR_MergeMemOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable15\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable15, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def MOV1 : I<(outs GPR32:$dst), (ins), [(set GPR32:$dst, 1)]>;
//===- Test a pattern with an MBB operand. --------------------------------===//
-// CHECK-LABEL: if ([&]() {
-// CHECK-NEXT: MachineInstr &MI0 = I;
-// CHECK-NEXT: if (MI0.getNumOperands() < 1)
-// CHECK-NEXT: return false;
-// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_BR) &&
-// CHECK-NEXT: ((/* target */ (MI0.getOperand(0).isMBB())))) {
-
-// CHECK-NEXT: // (br (bb:Other):$target) => (BR (bb:Other):$target)
-// CHECK-NEXT: I.setDesc(TII.get(MyTarget::BR));
-// CHECK-NEXT: MachineInstr &NewI = I;
-// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
-// CHECK-NEXT: return true;
-// CHECK-NEXT: }
-// CHECK-NEXT: return false;
-// CHECK-NEXT: }()) { return true; }
+// CHECK-LABEL: MatchTable16[] = {
+// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/1,
+// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_BR,
+// CHECK-NEXT: // MIs[0] target
+// CHECK-NEXT: GIM_CheckIsMBB, /*MI*/0, /*Op*/0,
+// CHECK-NEXT: // (br (bb:Other):$target) => (BR (bb:Other):$target)
+// CHECK-NEXT: GIR_MutateOpcode, /*InsnID*/0, /*RecycleInsnID*/ 0, /*Opcode*/MyTarget::BR,
+// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/0,
+// CHECK-NEXT: GIR_Done,
+// CHECK-NEXT: };
+// CHECK-NEXT: MIs.resize(1);
+// CHECK-NEXT: DEBUG(dbgs() << "Processing MatchTable16\n");
+// CHECK-NEXT: if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable16, TII, MRI, TRI, RBI, AvailableFeatures)) {
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
def BR : I<(outs), (ins unknown:$target),
[(br bb:$target)]>;
diff --git a/test/TableGen/UnterminatedComment.td b/test/TableGen/UnterminatedComment.td
index f92525a99164..f386e4cef83b 100644
--- a/test/TableGen/UnterminatedComment.td
+++ b/test/TableGen/UnterminatedComment.td
@@ -1,4 +1,4 @@
-// RUN: not llvm-tblgen < %s >& /dev/null
+// RUN: not llvm-tblgen < %s > /dev/null 2>&1
def x;
diff --git a/test/Transforms/ArgumentPromotion/pr33641_remove_arg_dbgvalue.ll b/test/Transforms/ArgumentPromotion/pr33641_remove_arg_dbgvalue.ll
new file mode 100644
index 000000000000..7ce8ab3ac521
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/pr33641_remove_arg_dbgvalue.ll
@@ -0,0 +1,38 @@
+; RUN: opt -argpromotion -verify -dse -S %s -o - | FileCheck %s
+
+; Fix for PR33641. ArgumentPromotion removed the argument to bar but left the call to
+; dbg.value which still used the removed argument.
+
+%p_t = type i16*
+%fun_t = type void (%p_t)*
+
+define void @foo() {
+ %tmp = alloca %fun_t
+ store %fun_t @bar, %fun_t* %tmp
+ ret void
+}
+
+define internal void @bar(%p_t %p) {
+ call void @llvm.dbg.value(metadata %p_t %p, i64 0, metadata !4, metadata !5), !dbg !6
+ ret void
+}
+
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1)
+!1 = !DIFile(filename: "test.c", directory: "")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "bar", unit: !0)
+!4 = !DILocalVariable(name: "p", scope: !3)
+!5 = !DIExpression()
+!6 = !DILocation(line: 1, column: 1, scope: !3)
+
+; The %p argument should be removed, and the use of it in dbg.value should be
+; changed to undef.
+; CHECK: define internal void @bar() {
+; CHECK-NEXT: call void @llvm.dbg.value(metadata i16* undef
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
diff --git a/test/Transforms/CodeGenPrepare/X86/memcmp.ll b/test/Transforms/CodeGenPrepare/X86/memcmp.ll
index 2435cd7d0a83..4b9e7c3956f5 100644
--- a/test/Transforms/CodeGenPrepare/X86/memcmp.ll
+++ b/test/Transforms/CodeGenPrepare/X86/memcmp.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -codegenprepare -mtriple=i686-unknown-unknown -data-layout=e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X32
; RUN: opt -S -codegenprepare -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64
@@ -5,8 +6,8 @@ declare i32 @memcmp(i8* nocapture, i8* nocapture, i64)
define i32 @cmp2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp2(
-; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i16*
-; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i16*
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16*
+; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16*
; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]]
; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]])
@@ -23,7 +24,7 @@ define i32 @cmp2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp3(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 3)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 3)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 3)
@@ -32,8 +33,8 @@ define i32 @cmp3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp4(
-; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
-; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i32*
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
@@ -50,7 +51,7 @@ define i32 @cmp4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp5(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 5)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 5)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 5)
@@ -59,7 +60,7 @@ define i32 @cmp5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp6(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 6)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 6)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 6)
@@ -68,7 +69,7 @@ define i32 @cmp6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp7(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 7)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 7)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 7)
@@ -77,12 +78,12 @@ define i32 @cmp7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; X32-LABEL: @cmp8(
-; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 8)
+; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 8)
; X32-NEXT: ret i32 [[CALL]]
;
; X64-LABEL: @cmp8(
-; X64-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i64*
-; X64-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
@@ -99,7 +100,7 @@ define i32 @cmp8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp9(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 9)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 9)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 9)
@@ -108,7 +109,7 @@ define i32 @cmp9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp10(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 10)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 10)
@@ -117,7 +118,7 @@ define i32 @cmp10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp11(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 11)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 11)
@@ -126,7 +127,7 @@ define i32 @cmp11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp12(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 12)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 12)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 12)
@@ -135,7 +136,7 @@ define i32 @cmp12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp13(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 13)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 13)
@@ -144,7 +145,7 @@ define i32 @cmp13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp14(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 14)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 14)
@@ -153,7 +154,7 @@ define i32 @cmp14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp15(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 15)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 15)
@@ -162,7 +163,7 @@ define i32 @cmp15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp16(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp16(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 16)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 16)
; ALL-NEXT: ret i32 [[CALL]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16)
@@ -171,8 +172,8 @@ define i32 @cmp16(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq2(
-; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i16*
-; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i16*
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16*
+; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16*
; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]]
; ALL-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP3]], [[TMP4]]
@@ -189,7 +190,7 @@ define i32 @cmp_eq2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq3(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 3)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 3)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -202,8 +203,8 @@ define i32 @cmp_eq3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq4(
-; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
-; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i32*
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
; ALL-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]]
@@ -220,7 +221,7 @@ define i32 @cmp_eq4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq5(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 5)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 5)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -233,7 +234,7 @@ define i32 @cmp_eq5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq6(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 6)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 6)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -246,7 +247,7 @@ define i32 @cmp_eq6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq7(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 7)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 7)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -259,14 +260,14 @@ define i32 @cmp_eq7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; X32-LABEL: @cmp_eq8(
-; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 8)
+; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 8)
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; X32-NEXT: ret i32 [[CONV]]
;
; X64-LABEL: @cmp_eq8(
-; X64-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i64*
-; X64-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
; X64-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
@@ -283,7 +284,7 @@ define i32 @cmp_eq8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq9(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 9)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 9)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -296,7 +297,7 @@ define i32 @cmp_eq9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq10(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 10)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -309,7 +310,7 @@ define i32 @cmp_eq10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq11(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 11)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -322,7 +323,7 @@ define i32 @cmp_eq11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq12(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 12)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 12)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -335,7 +336,7 @@ define i32 @cmp_eq12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq13(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 13)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -348,7 +349,7 @@ define i32 @cmp_eq13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq14(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 14)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -361,7 +362,7 @@ define i32 @cmp_eq14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq15(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 15)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
@@ -374,7 +375,7 @@ define i32 @cmp_eq15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq16(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq16(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* %x, i8* %y, i64 16)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 16)
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
diff --git a/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll b/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
index 9d6e668167fb..b6b775797826 100644
--- a/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
+++ b/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
@@ -4,6 +4,8 @@ target datalayout =
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
+@x = external global [1 x [2 x <4 x float>]]
+
; Can we sink single addressing mode computation to use?
define void @test1(i1 %cond, i64* %base) {
; CHECK-LABEL: @test1
@@ -194,3 +196,25 @@ rare.2:
declare void @slowpath(i32, i32*)
+
+; Make sure we don't end up in an infinite loop after we fail to sink.
+; CHECK-LABEL: define void @test8
+; CHECK: %ptr = getelementptr i8, i8* %aFOO_load_ptr2int_2void, i32 undef
+define void @test8() {
+allocas:
+ %aFOO_load = load float*, float** undef
+ %aFOO_load_ptr2int = ptrtoint float* %aFOO_load to i64
+ %aFOO_load_ptr2int_broadcast_init = insertelement <4 x i64> undef, i64 %aFOO_load_ptr2int, i32 0
+ %aFOO_load_ptr2int_2void = inttoptr i64 %aFOO_load_ptr2int to i8*
+ %ptr = getelementptr i8, i8* %aFOO_load_ptr2int_2void, i32 undef
+ br label %load.i145
+
+load.i145:
+ %ptr.i143 = bitcast i8* %ptr to <4 x float>*
+ %valall.i144 = load <4 x float>, <4 x float>* %ptr.i143, align 4
+ %x_offset = getelementptr [1 x [2 x <4 x float>]], [1 x [2 x <4 x float>]]* @x, i32 0, i64 0
+ br label %pl_loop.i.i122
+
+pl_loop.i.i122:
+ br label %pl_loop.i.i122
+}
diff --git a/test/Transforms/CodeGenPrepare/crash-on-large-allocas.ll b/test/Transforms/CodeGenPrepare/crash-on-large-allocas.ll
new file mode 100644
index 000000000000..3808c0e61c10
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/crash-on-large-allocas.ll
@@ -0,0 +1,16 @@
+; RUN: opt -S -codegenprepare %s -o - | FileCheck %s
+;
+; Ensure that we don't {crash,return a bad value} when given an alloca larger
+; than what a pointer can represent.
+
+target datalayout = "p:16:16"
+
+; CHECK-LABEL: @alloca_overflow_is_unknown(
+define i16 @alloca_overflow_is_unknown() {
+ %i = alloca i8, i32 65537
+ %j = call i16 @llvm.objectsize.i16.p0i8(i8* %i, i1 false, i1 false)
+ ; CHECK: ret i16 -1
+ ret i16 %j
+}
+
+declare i16 @llvm.objectsize.i16.p0i8(i8*, i1, i1)
diff --git a/test/Transforms/ConstantHoisting/ARM/bad-cases.ll b/test/Transforms/ConstantHoisting/ARM/bad-cases.ll
index ffcfb2e56c95..315e69998c62 100644
--- a/test/Transforms/ConstantHoisting/ARM/bad-cases.ll
+++ b/test/Transforms/ConstantHoisting/ARM/bad-cases.ll
@@ -107,3 +107,34 @@ entry:
%ret = add i32 %cast0, %cast1
ret i32 %ret
}
+
+@exception_type = external global i8
+
+; Constants in inline ASM should not be hoisted.
+define i32 @inline_asm_invoke() personality i8* null {
+;CHECK-LABEL: @inline_asm_invoke
+;CHECK-NOT: %const = 214672
+;CHECK: %X = invoke i32 asm "bswap $0", "=r,r"(i32 214672)
+ %X = invoke i32 asm "bswap $0", "=r,r"(i32 214672)
+ to label %L unwind label %lpad
+;CHECK: %Y = invoke i32 asm "bswap $0", "=r,r"(i32 214672)
+ %Y = invoke i32 asm "bswap $0", "=r,r"(i32 214672)
+ to label %L unwind label %lpad
+L:
+ ret i32 %X
+lpad:
+ %lp = landingpad i32
+ cleanup
+ catch i8* @exception_type
+ ret i32 1
+}
+
+define i32 @inline_asm_call() {
+;CHECK-LABEL: @inline_asm_call
+;CHECK-NOT: %const = 214672
+;CHECK: %X = call i32 asm "bswap $0", "=r,r"(i32 214672)
+ %X = call i32 asm "bswap $0", "=r,r"(i32 214672)
+;CHECK: %Y = call i32 asm "bswap $0", "=r,r"(i32 214672)
+ %Y = call i32 asm "bswap $0", "=r,r"(i32 214672)
+ ret i32 %X
+}
diff --git a/test/Transforms/ConstantHoisting/ARM/insertvalue.ll b/test/Transforms/ConstantHoisting/ARM/insertvalue.ll
new file mode 100644
index 000000000000..99fe7fbe22a5
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/ARM/insertvalue.ll
@@ -0,0 +1,31 @@
+; RUN: opt -consthoist -S < %s | FileCheck %s
+target triple = "thumbv6m-none-eabi"
+
+%T = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32 }
+
+; The second operand of insertvalue is able to be hoisted.
+define void @test1(%T %P) {
+; CHECK-LABEL: @test1
+; CHECK: %const = bitcast i32 256 to i32
+; CHECK: %1 = insertvalue %T %P, i32 %const, 256
+; CHECK: %2 = insertvalue %T %P, i32 %const, 256
+ %1 = insertvalue %T %P, i32 256, 256
+ %2 = insertvalue %T %P, i32 256, 256
+ ret void
+}
diff --git a/test/Transforms/ConstantHoisting/X86/ehpad.ll b/test/Transforms/ConstantHoisting/X86/ehpad.ll
index 4f87572f3447..5e345c4515d7 100644
--- a/test/Transforms/ConstantHoisting/X86/ehpad.ll
+++ b/test/Transforms/ConstantHoisting/X86/ehpad.ll
@@ -1,9 +1,6 @@
-; RUN: opt -S -consthoist < %s | FileCheck %s
+; RUN: opt -S -consthoist -consthoist-with-block-frequency=false < %s | FileCheck %s
; RUN: opt -S -consthoist -consthoist-with-block-frequency=true < %s | FileCheck --check-prefix=BFIHOIST %s
-; FIXME: The catchpad doesn't even use the constant, so a better fix would be to
-; insert the bitcast in the catchpad block.
-
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc"
diff --git a/test/Transforms/GVN/PRE/atomic.ll b/test/Transforms/GVN/PRE/atomic.ll
index 509acd613e95..3479bc9a0e33 100644
--- a/test/Transforms/GVN/PRE/atomic.ll
+++ b/test/Transforms/GVN/PRE/atomic.ll
@@ -208,14 +208,14 @@ define void @fence_seq_cst(i32* %P1, i32* %P2) {
ret void
}
-; Can't DSE across a full singlethread fence
+; Can't DSE across a full syncscope("singlethread") fence
define void @fence_seq_cst_st(i32* %P1, i32* %P2) {
; CHECK-LABEL: @fence_seq_cst_st(
; CHECK: store
-; CHECK: fence singlethread seq_cst
+; CHECK: fence syncscope("singlethread") seq_cst
; CHECK: store
store i32 0, i32* %P1, align 4
- fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
store i32 0, i32* %P1, align 4
ret void
}
diff --git a/test/Transforms/GVN/PRE/phi-translate-2.ll b/test/Transforms/GVN/PRE/phi-translate-2.ll
deleted file mode 100644
index 78681e20df5e..000000000000
--- a/test/Transforms/GVN/PRE/phi-translate-2.ll
+++ /dev/null
@@ -1,131 +0,0 @@
-; RUN: opt < %s -gvn -S | FileCheck %s
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-
-@a = common global [100 x i64] zeroinitializer, align 16
-@b = common global [100 x i64] zeroinitializer, align 16
-@g1 = common global i64 0, align 8
-@g2 = common global i64 0, align 8
-@g3 = common global i64 0, align 8
-declare i64 @goo(...) local_unnamed_addr #1
-
-define void @test1(i64 %a, i64 %b, i64 %c, i64 %d) {
-entry:
- %mul = mul nsw i64 %b, %a
- store i64 %mul, i64* @g1, align 8
- %t0 = load i64, i64* @g2, align 8
- %cmp = icmp sgt i64 %t0, 3
- br i1 %cmp, label %if.then, label %if.end
-
-if.then: ; preds = %entry
- %mul2 = mul nsw i64 %d, %c
- store i64 %mul2, i64* @g2, align 8
- br label %if.end
-
-; Check phi-translate works and mul is removed.
-; CHECK-LABEL: @test1(
-; CHECK: if.end:
-; CHECK: %[[MULPHI:.*]] = phi i64 [ {{.*}}, %if.then ], [ %mul, %entry ]
-; CHECK-NOT: = mul
-; CHECK: store i64 %[[MULPHI]], i64* @g3, align 8
-if.end: ; preds = %if.then, %entry
- %b.addr.0 = phi i64 [ %d, %if.then ], [ %b, %entry ]
- %a.addr.0 = phi i64 [ %c, %if.then ], [ %a, %entry ]
- %mul3 = mul nsw i64 %a.addr.0, %b.addr.0
- store i64 %mul3, i64* @g3, align 8
- ret void
-}
-
-define void @test2(i64 %i) {
-entry:
- %arrayidx = getelementptr inbounds [100 x i64], [100 x i64]* @a, i64 0, i64 %i
- %t0 = load i64, i64* %arrayidx, align 8
- %arrayidx1 = getelementptr inbounds [100 x i64], [100 x i64]* @b, i64 0, i64 %i
- %t1 = load i64, i64* %arrayidx1, align 8
- %mul = mul nsw i64 %t1, %t0
- store i64 %mul, i64* @g1, align 8
- %cmp = icmp sgt i64 %mul, 3
- br i1 %cmp, label %if.then, label %if.end
-
-; Check phi-translate works for the phi generated by loadpre. A new mul will be
-; inserted in if.then block.
-; CHECK-LABEL: @test2(
-; CHECK: if.then:
-; CHECK: %[[MUL_THEN:.*]] = mul
-; CHECK: br label %if.end
-if.then: ; preds = %entry
- %call = tail call i64 (...) @goo() #2
- store i64 %call, i64* @g2, align 8
- br label %if.end
-
-; CHECK: if.end:
-; CHECK: %[[MULPHI:.*]] = phi i64 [ %[[MUL_THEN]], %if.then ], [ %mul, %entry ]
-; CHECK-NOT: = mul
-; CHECK: store i64 %[[MULPHI]], i64* @g3, align 8
-if.end: ; preds = %if.then, %entry
- %i.addr.0 = phi i64 [ 3, %if.then ], [ %i, %entry ]
- %arrayidx3 = getelementptr inbounds [100 x i64], [100 x i64]* @a, i64 0, i64 %i.addr.0
- %t2 = load i64, i64* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds [100 x i64], [100 x i64]* @b, i64 0, i64 %i.addr.0
- %t3 = load i64, i64* %arrayidx4, align 8
- %mul5 = mul nsw i64 %t3, %t2
- store i64 %mul5, i64* @g3, align 8
- ret void
-}
-
-; Check phi-translate doesn't go through backedge, which may lead to incorrect
-; pre transformation.
-; CHECK: for.end:
-; CHECK-NOT: %{{.*pre-phi}} = phi
-; CHECK: ret void
-define void @test3(i64 %N, i64* nocapture readonly %a) {
-entry:
- br label %for.cond
-
-for.cond: ; preds = %for.body, %entry
- %i.0 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %add = add nuw nsw i64 %i.0, 1
- %arrayidx = getelementptr inbounds i64, i64* %a, i64 %add
- %tmp0 = load i64, i64* %arrayidx, align 8
- %cmp = icmp slt i64 %i.0, %N
- br i1 %cmp, label %for.body, label %for.end
-
-for.body: ; preds = %for.cond
- %call = tail call i64 (...) @goo() #2
- %add1 = sub nsw i64 0, %call
- %tobool = icmp eq i64 %tmp0, %add1
- br i1 %tobool, label %for.cond, label %for.end
-
-for.end: ; preds = %for.body, %for.cond
- %i.0.lcssa = phi i64 [ %i.0, %for.body ], [ %i.0, %for.cond ]
- %arrayidx2 = getelementptr inbounds i64, i64* %a, i64 %i.0.lcssa
- %tmp1 = load i64, i64* %arrayidx2, align 8
- store i64 %tmp1, i64* @g1, align 8
- ret void
-}
-
-; It is incorrect to use the value of %andres in last loop iteration
-; to do pre.
-; CHECK-LABEL: @test4(
-; CHECK: for.body:
-; CHECK-NOT: %andres.pre-phi = phi i32
-; CHECK: br i1 %tobool1
-
-define i32 @test4(i32 %cond, i32 %SectionAttrs.0231.ph, i32 *%AttrFlag) {
-for.body.preheader:
- %t514 = load volatile i32, i32* %AttrFlag
- br label %for.body
-
-for.body:
- %t320 = phi i32 [ %t334, %bb343 ], [ %t514, %for.body.preheader ]
- %andres = and i32 %t320, %SectionAttrs.0231.ph
- %tobool1 = icmp eq i32 %andres, 0
- br i1 %tobool1, label %bb343, label %critedge.loopexit
-
-bb343:
- %t334 = load volatile i32, i32* %AttrFlag
- %tobool2 = icmp eq i32 %cond, 0
- br i1 %tobool2, label %critedge.loopexit, label %for.body
-
-critedge.loopexit:
- unreachable
-}
diff --git a/test/Transforms/GVN/PRE/pre-gep-load.ll b/test/Transforms/GVN/PRE/pre-gep-load.ll
index 1b2b4d20d31d..9eec8bb6455b 100644
--- a/test/Transforms/GVN/PRE/pre-gep-load.ll
+++ b/test/Transforms/GVN/PRE/pre-gep-load.ll
@@ -37,7 +37,7 @@ sw.bb2: ; preds = %if.end, %entry
%3 = load double, double* %arrayidx5, align 8
; CHECK: sw.bb2:
; CHECK-NOT: sext
-; CHECK: phi double [
+; CHECK-NEXT: phi double [
; CHECK-NOT: load
%sub6 = fsub double 3.000000e+00, %3
br label %return
diff --git a/test/Transforms/GVN/PRE/pre-load.ll b/test/Transforms/GVN/PRE/pre-load.ll
index ffff2b7f08e5..685df24f62b6 100644
--- a/test/Transforms/GVN/PRE/pre-load.ll
+++ b/test/Transforms/GVN/PRE/pre-load.ll
@@ -72,7 +72,7 @@ block4:
%PRE = load i32, i32* %P3
ret i32 %PRE
; CHECK: block4:
-; CHECK: phi i32 [
+; CHECK-NEXT: phi i32 [
; CHECK-NOT: load
; CHECK: ret i32
}
@@ -104,7 +104,7 @@ block4:
%PRE = load i32, i32* %P3
ret i32 %PRE
; CHECK: block4:
-; CHECK: phi i32 [
+; CHECK-NEXT: phi i32 [
; CHECK-NOT: load
; CHECK: ret i32
}
@@ -263,7 +263,7 @@ block4:
%PRE = load i32, i32* %P3
ret i32 %PRE
; CHECK: block4:
-; CHECK: phi i32 [
+; CHECK-NEXT: phi i32 [
; CHECK-NOT: load
; CHECK: ret i32
}
diff --git a/test/Transforms/IndVarSimplify/canonicalize-cmp.ll b/test/Transforms/IndVarSimplify/canonicalize-cmp.ll
new file mode 100644
index 000000000000..2b939767284a
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/canonicalize-cmp.ll
@@ -0,0 +1,98 @@
+; RUN: opt -S -indvars < %s | FileCheck %s
+
+; Check that we replace signed comparisons between non-negative values with
+; unsigned comparisons if we can.
+
+target datalayout = "n8:16:32:64"
+
+define i32 @test_01(i32 %a, i32 %b, i32* %p) {
+
+; CHECK-LABEL: @test_01(
+; CHECK-NOT: icmp slt
+; CHECK: %cmp1 = icmp ult i32 %iv, 100
+; CHECK: %cmp2 = icmp ult i32 %iv, 100
+; CHECK-NOT: %cmp3
+; CHECK: %exitcond = icmp ne i32 %iv.next, 1000
+
+entry:
+ br label %loop.entry
+
+loop.entry:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.be ]
+ %cmp1 = icmp slt i32 %iv, 100
+ br i1 %cmp1, label %b1, label %b2
+
+b1:
+ store i32 %iv, i32* %p
+ br label %merge
+
+b2:
+ store i32 %a, i32* %p
+ br label %merge
+
+merge:
+ %cmp2 = icmp ult i32 %iv, 100
+ br i1 %cmp2, label %b3, label %b4
+
+b3:
+ store i32 %iv, i32* %p
+ br label %loop.be
+
+b4:
+ store i32 %b, i32* %p
+ br label %loop.be
+
+loop.be:
+ %iv.next = add i32 %iv, 1
+ %cmp3 = icmp slt i32 %iv.next, 1000
+ br i1 %cmp3, label %loop.entry, label %exit
+
+exit:
+ ret i32 %iv
+}
+
+define i32 @test_02(i32 %a, i32 %b, i32* %p) {
+
+; CHECK-LABEL: @test_02(
+; CHECK-NOT: icmp sgt
+; CHECK: %cmp1 = icmp ugt i32 100, %iv
+; CHECK: %cmp2 = icmp ugt i32 100, %iv
+; CHECK-NOT: %cmp3
+; CHECK: %exitcond = icmp ne i32 %iv.next, 1000
+
+entry:
+ br label %loop.entry
+
+loop.entry:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.be ]
+ %cmp1 = icmp sgt i32 100, %iv
+ br i1 %cmp1, label %b1, label %b2
+
+b1:
+ store i32 %iv, i32* %p
+ br label %merge
+
+b2:
+ store i32 %a, i32* %p
+ br label %merge
+
+merge:
+ %cmp2 = icmp ugt i32 100, %iv
+ br i1 %cmp2, label %b3, label %b4
+
+b3:
+ store i32 %iv, i32* %p
+ br label %loop.be
+
+b4:
+ store i32 %b, i32* %p
+ br label %loop.be
+
+loop.be:
+ %iv.next = add i32 %iv, 1
+ %cmp3 = icmp sgt i32 1000, %iv.next
+ br i1 %cmp3, label %loop.entry, label %exit
+
+exit:
+ ret i32 %iv
+}
diff --git a/test/Transforms/IndVarSimplify/eliminate-comparison.ll b/test/Transforms/IndVarSimplify/eliminate-comparison.ll
index 612f01e3cade..a63617e62c0e 100644
--- a/test/Transforms/IndVarSimplify/eliminate-comparison.ll
+++ b/test/Transforms/IndVarSimplify/eliminate-comparison.ll
@@ -111,7 +111,7 @@ return:
; Indvars should not turn the second loop into an infinite one.
; CHECK-LABEL: @func_11(
-; CHECK: %tmp5 = icmp slt i32 %__key6.0, 10
+; CHECK: %tmp5 = icmp ult i32 %__key6.0, 10
; CHECK-NOT: br i1 true, label %noassert68, label %unrolledend
define i32 @func_11() nounwind uwtable {
@@ -163,7 +163,7 @@ declare void @llvm.trap() noreturn nounwind
; In this case the second loop only has a single iteration, fold the header away
; CHECK-LABEL: @func_12(
-; CHECK: %tmp5 = icmp slt i32 %__key6.0, 10
+; CHECK: %tmp5 = icmp ult i32 %__key6.0, 10
; CHECK: br i1 true, label %noassert68, label %unrolledend
define i32 @func_12() nounwind uwtable {
entry:
diff --git a/test/Transforms/IndVarSimplify/strengthen-overflow.ll b/test/Transforms/IndVarSimplify/strengthen-overflow.ll
index 2bafe96e1ccc..6e0538e04d6b 100644
--- a/test/Transforms/IndVarSimplify/strengthen-overflow.ll
+++ b/test/Transforms/IndVarSimplify/strengthen-overflow.ll
@@ -104,5 +104,89 @@ define i32 @test.unsigned.add.1(i32* %array, i32 %length, i32 %init) {
ret i32 42
}
+define hidden void @test.shl.exact.equal() {
+; CHECK-LABEL: @test.shl.exact.equal
+entry:
+ br label %for.body
+
+for.body:
+; CHECK-LABEL: for.body
+ %k.021 = phi i32 [ 1, %entry ], [ %inc, %for.body ]
+ %shl = shl i32 1, %k.021
+ %shr1 = ashr i32 %shl, 1
+; CHECK: %shr1 = ashr exact i32 %shl, 1
+ %shr2 = lshr i32 %shl, 1
+; CHECK: %shr2 = lshr exact i32 %shl, 1
+ %inc = add nuw nsw i32 %k.021, 1
+ %exitcond = icmp eq i32 %inc, 9
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define hidden void @test.shl.exact.greater() {
+; CHECK-LABEL: @test.shl.exact.greater
+entry:
+ br label %for.body
+
+for.body:
+; CHECK-LABEL: for.body
+ %k.021 = phi i32 [ 3, %entry ], [ %inc, %for.body ]
+ %shl = shl i32 1, %k.021
+ %shr1 = ashr i32 %shl, 2
+; CHECK: %shr1 = ashr exact i32 %shl, 2
+ %shr2 = lshr i32 %shl, 2
+; CHECK: %shr2 = lshr exact i32 %shl, 2
+ %inc = add nuw nsw i32 %k.021, 1
+ %exitcond = icmp eq i32 %inc, 9
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define hidden void @test.shl.exact.unbound(i32 %arg) {
+; CHECK-LABEL: @test.shl.exact.unbound
+entry:
+ br label %for.body
+
+for.body:
+; CHECK-LABEL: for.body
+ %k.021 = phi i32 [ 2, %entry ], [ %inc, %for.body ]
+ %shl = shl i32 1, %k.021
+ %shr1 = ashr i32 %shl, 2
+; CHECK: %shr1 = ashr exact i32 %shl, 2
+ %shr2 = lshr i32 %shl, 2
+; CHECK: %shr2 = lshr exact i32 %shl, 2
+ %inc = add nuw nsw i32 %k.021, 1
+ %exitcond = icmp eq i32 %inc, %arg
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define hidden void @test.shl.nonexact() {
+; CHECK-LABEL: @test.shl.nonexact
+entry:
+ br label %for.body
+
+for.body:
+; CHECK-LABEL: for.body
+ %k.021 = phi i32 [ 2, %entry ], [ %inc, %for.body ]
+ %shl = shl i32 1, %k.021
+ %shr1 = ashr i32 %shl, 3
+; CHECK: %shr1 = ashr i32 %shl, 3
+ %shr2 = lshr i32 %shl, 3
+; CHECK: %shr2 = lshr i32 %shl, 3
+ %inc = add nuw nsw i32 %k.021, 1
+ %exitcond = icmp eq i32 %inc, 9
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
!0 = !{i32 0, i32 2}
!1 = !{i32 0, i32 42}
diff --git a/test/Transforms/IndVarSimplify/widen-loop-comp.ll b/test/Transforms/IndVarSimplify/widen-loop-comp.ll
index b87cd0550192..2d24cd732ce8 100644
--- a/test/Transforms/IndVarSimplify/widen-loop-comp.ll
+++ b/test/Transforms/IndVarSimplify/widen-loop-comp.ll
@@ -64,7 +64,7 @@ for.end:
; CHECK-LABEL: @test2
; CHECK: for.body4.us
; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-; CHECK: %cmp2.us = icmp slt i64
+; CHECK: %cmp2.us = icmp ult i64
; CHECK-NOT: %2 = trunc i64 %indvars.iv.next to i32
; CHECK-NOT: %cmp2.us = icmp slt i32
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll b/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
index b566c147e9b8..1eab70754030 100644
--- a/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
@@ -170,4 +170,16 @@ define { i32 addrspace(4)*, i1 } @cmpxchg_group_to_flat_wrong_operand(i32 addrsp
ret { i32 addrspace(4)*, i1 } %ret
}
+; Null pointer in local addr space
+; CHECK-LABEL: @local_nullptr
+; CHECK: icmp ne i8 addrspace(3)* %a, addrspacecast (i8* null to i8 addrspace(3)*)
+; CHECK-NOT: i8 addrspace(3)* null
+define void @local_nullptr(i32 addrspace(1)* nocapture %results, i8 addrspace(3)* %a) {
+entry:
+ %tobool = icmp ne i8 addrspace(3)* %a, addrspacecast (i8* null to i8 addrspace(3)*)
+ %conv = zext i1 %tobool to i32
+ store i32 %conv, i32 addrspace(1)* %results, align 4
+ ret void
+}
+
attributes #0 = { nounwind }
diff --git a/test/Transforms/Inline/ARM/inline-target-attr.ll b/test/Transforms/Inline/ARM/inline-target-attr.ll
new file mode 100644
index 000000000000..5bbecd203528
--- /dev/null
+++ b/test/Transforms/Inline/ARM/inline-target-attr.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -mtriple=arm-unknown-linux-gnu -S -inline | FileCheck %s
+; RUN: opt < %s -mtriple=arm-unknown-linux-gnu -S -passes='cgscc(inline)' | FileCheck %s
+; Check that we only inline when we have compatible target attributes.
+; ARM has implemented a target attribute that will verify that the attribute
+; sets are compatible.
+
+define i32 @foo() #0 {
+entry:
+ %call = call i32 (...) @baz()
+ ret i32 %call
+; CHECK-LABEL: foo
+; CHECK: call i32 (...) @baz()
+}
+declare i32 @baz(...) #0
+
+define i32 @bar() #1 {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+; CHECK-LABEL: bar
+; CHECK: call i32 (...) @baz()
+}
+
+define i32 @qux() #0 {
+entry:
+ %call = call i32 @bar()
+ ret i32 %call
+; CHECK-LABEL: qux
+; CHECK: call i32 @bar()
+}
+
+define i32 @thumb_fn() #2 {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+; CHECK-LABEL: thumb_fn
+; CHECK: call i32 @foo
+}
+
+define i32 @strict_align() #3 {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+; CHECK-LABEL: strict_align
+; CHECK: call i32 (...) @baz()
+}
+
+define i32 @soft_float_fn() #4 {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+; CHECK-LABEL: soft_float_fn
+; CHECK: call i32 @foo
+}
+
+attributes #0 = { "target-cpu"="generic" "target-features"="+dsp,+neon" }
+attributes #1 = { "target-cpu"="generic" "target-features"="+dsp,+neon,+fp16" }
+attributes #2 = { "target-cpu"="generic" "target-features"="+dsp,+neon,+fp16,+thumb-mode" }
+attributes #3 = { "target-cpu"="generic" "target-features"="+dsp,+neon,+strict-align" }
+attributes #4 = { "target-cpu"="generic" "target-features"="+dsp,+neon,+fp16,+soft-float" }
diff --git a/test/Transforms/Inline/ARM/lit.local.cfg b/test/Transforms/Inline/ARM/lit.local.cfg
new file mode 100644
index 000000000000..236e1d344166
--- /dev/null
+++ b/test/Transforms/Inline/ARM/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'ARM' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/Inline/cgscc-incremental-invalidate.ll b/test/Transforms/Inline/cgscc-incremental-invalidate.ll
index 82d321ccf225..164f7a66a6f3 100644
--- a/test/Transforms/Inline/cgscc-incremental-invalidate.ll
+++ b/test/Transforms/Inline/cgscc-incremental-invalidate.ll
@@ -11,17 +11,35 @@
; CHECK: Running analysis: FunctionAnalysisManagerCGSCCProxy on (test1_f, test1_g, test1_h)
; CHECK: Running analysis: DominatorTreeAnalysis on test1_f
; CHECK: Running analysis: DominatorTreeAnalysis on test1_g
-; CHECK: Invalidating all non-preserved analyses for: (test1_f, test1_g, test1_h)
+; CHECK: Invalidating all non-preserved analyses for: (test1_f)
; CHECK: Invalidating all non-preserved analyses for: test1_f
; CHECK: Invalidating analysis: DominatorTreeAnalysis on test1_f
+; CHECK: Invalidating analysis: LoopAnalysis on test1_f
+; CHECK: Invalidating analysis: BranchProbabilityAnalysis on test1_f
+; CHECK: Invalidating analysis: BlockFrequencyAnalysis on test1_f
+; CHECK: Invalidating all non-preserved analyses for: (test1_g, test1_h)
; CHECK: Invalidating all non-preserved analyses for: test1_g
; CHECK: Invalidating analysis: DominatorTreeAnalysis on test1_g
-; CHECK: Invalidating all non-preserved analyses for: test1_h
-; CHECK-NOT: Invalidating anaylsis:
-; CHECK: Running analysis: DominatorTreeAnalysis on test1_h
-; CHECK: Invalidating all non-preserved analyses for: (test1_g, test1_h)
+; CHECK: Invalidating analysis: LoopAnalysis on test1_g
+; CHECK: Invalidating analysis: BranchProbabilityAnalysis on test1_g
+; CHECK: Invalidating analysis: BlockFrequencyAnalysis on test1_g
; CHECK: Invalidating all non-preserved analyses for: test1_h
; CHECK: Invalidating analysis: DominatorTreeAnalysis on test1_h
+; CHECK: Invalidating analysis: LoopAnalysis on test1_h
+; CHECK: Invalidating analysis: BranchProbabilityAnalysis on test1_h
+; CHECK: Invalidating analysis: BlockFrequencyAnalysis on test1_h
+; CHECK-NOT: Invalidating analysis:
+; CHECK: Starting llvm::Function pass manager run.
+; CHECK-NEXT: Running pass: DominatorTreeVerifierPass on test1_g
+; CHECK-NEXT: Running analysis: DominatorTreeAnalysis on test1_g
+; CHECK-NEXT: Finished llvm::Function pass manager run.
+; CHECK-NEXT: Starting llvm::Function pass manager run.
+; CHECK-NEXT: Running pass: DominatorTreeVerifierPass on test1_h
+; CHECK-NEXT: Running analysis: DominatorTreeAnalysis on test1_h
+; CHECK-NEXT: Finished llvm::Function pass manager run.
+; CHECK-NOT: Invalidating analysis:
+; CHECK: Running pass: DominatorTreeVerifierPass on test1_f
+; CHECK-NEXT: Running analysis: DominatorTreeAnalysis on test1_f
; An external function used to control branches.
declare i1 @flag()
@@ -109,3 +127,80 @@ entry:
ret void
; CHECK: ret void
}
+
+; The 'test2_' prefixed code works to carefully trigger forming an SCC with
+; a dominator tree for one of the functions but not the other and without even
+; a function analysis manager proxy for the SCC that things get merged into.
+; Without proper handling when updating the call graph this will find a stale
+; dominator tree.
+
+@test2_global = external global i32, align 4
+
+define void @test2_hoge(i1 (i32*)* %arg) {
+; CHECK-LABEL: define void @test2_hoge(
+bb:
+ %tmp2 = call zeroext i1 %arg(i32* @test2_global)
+; CHECK: call zeroext i1 %arg(
+ br label %bb3
+
+bb3:
+ %tmp5 = call zeroext i1 %arg(i32* @test2_global)
+; CHECK: call zeroext i1 %arg(
+ br i1 %tmp5, label %bb3, label %bb6
+
+bb6:
+ ret void
+}
+
+define zeroext i1 @test2_widget(i32* %arg) {
+; CHECK-LABEL: define zeroext i1 @test2_widget(
+bb:
+ %tmp1 = alloca i8, align 1
+ %tmp2 = alloca i32, align 4
+ call void @test2_quux()
+; CHECK-NOT: call
+;
+; CHECK: call zeroext i1 @test2_widget(i32* @test2_global)
+; CHECK-NEXT: br label %[[NEW_BB:.*]]
+;
+; CHECK: [[NEW_BB]]:
+; CHECK-NEXT: call zeroext i1 @test2_widget(i32* @test2_global)
+;
+; CHECK: {{.*}}:
+
+ call void @test2_hoge.1(i32* %arg)
+; CHECK-NEXT: call void @test2_hoge.1(
+
+ %tmp4 = call zeroext i1 @test2_barney(i32* %tmp2)
+ %tmp5 = zext i1 %tmp4 to i32
+ store i32 %tmp5, i32* %tmp2, align 4
+ %tmp6 = call zeroext i1 @test2_barney(i32* null)
+ call void @test2_ham(i8* %tmp1)
+; CHECK: call void @test2_ham(
+
+ call void @test2_quux()
+; CHECK-NOT: call
+;
+; CHECK: call zeroext i1 @test2_widget(i32* @test2_global)
+; CHECK-NEXT: br label %[[NEW_BB:.*]]
+;
+; CHECK: [[NEW_BB]]:
+; CHECK-NEXT: call zeroext i1 @test2_widget(i32* @test2_global)
+;
+; CHECK: {{.*}}:
+ ret i1 true
+; CHECK-NEXT: ret i1 true
+}
+
+define internal void @test2_quux() {
+; CHECK-NOT: @test2_quux
+bb:
+ call void @test2_hoge(i1 (i32*)* @test2_widget)
+ ret void
+}
+
+declare void @test2_hoge.1(i32*)
+
+declare zeroext i1 @test2_barney(i32*)
+
+declare void @test2_ham(i8*)
diff --git a/test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll b/test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll
new file mode 100644
index 000000000000..3c4e08b5b515
--- /dev/null
+++ b/test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; CHECK: llvm.umul.with.overflow
+define i32 @sterix(i32, i8, i64) {
+entry:
+ %conv = zext i32 %0 to i64
+ %conv1 = sext i8 %1 to i32
+ %mul = mul i32 %conv1, 1945964878
+ %sh_prom = trunc i64 %2 to i32
+ %shr = lshr i32 %mul, %sh_prom
+ %conv2 = zext i32 %shr to i64
+ %mul3 = mul nuw nsw i64 %conv, %conv2
+ %conv6 = and i64 %mul3, 4294967295
+ %tobool = icmp ne i64 %conv6, %mul3
+ br i1 %tobool, label %lor.end, label %lor.rhs
+
+lor.rhs:
+ %and = and i64 %2, %mul3
+ %conv4 = trunc i64 %and to i32
+ %tobool7 = icmp ne i32 %conv4, 0
+ %lnot = xor i1 %tobool7, true
+ br label %lor.end
+
+lor.end:
+ %3 = phi i1 [ true, %entry ], [ %lnot, %lor.rhs ]
+ %conv8 = zext i1 %3 to i32
+ ret i32 %conv8
+}
+
diff --git a/test/Transforms/InstCombine/and-or-not.ll b/test/Transforms/InstCombine/and-or-not.ll
index 1baecb4a13a3..04f7be01eaf5 100644
--- a/test/Transforms/InstCombine/and-or-not.ll
+++ b/test/Transforms/InstCombine/and-or-not.ll
@@ -570,10 +570,8 @@ define i32 @xor_to_xnor1(float %fa, float %fb) {
; CHECK-LABEL: @xor_to_xnor1(
; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
; CHECK-NEXT: [[B:%.*]] = fptosi float [[FB:%.*]] to i32
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR2_DEMORGAN:%.*]] = and i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR2:%.*]] = xor i32 [[OR2_DEMORGAN]], -1
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR1]], [[OR2]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[XOR]]
;
%a = fptosi float %fa to i32
@@ -591,10 +589,8 @@ define i32 @xor_to_xnor2(float %fa, float %fb) {
; CHECK-LABEL: @xor_to_xnor2(
; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
; CHECK-NEXT: [[B:%.*]] = fptosi float [[FB:%.*]] to i32
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR2_DEMORGAN:%.*]] = and i32 [[B]], [[A]]
-; CHECK-NEXT: [[OR2:%.*]] = xor i32 [[OR2_DEMORGAN]], -1
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR1]], [[OR2]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[XOR]]
;
%a = fptosi float %fa to i32
@@ -612,10 +608,8 @@ define i32 @xor_to_xnor3(float %fa, float %fb) {
; CHECK-LABEL: @xor_to_xnor3(
; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
; CHECK-NEXT: [[B:%.*]] = fptosi float [[FB:%.*]] to i32
-; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR1:%.*]] = xor i32 [[OR1_DEMORGAN]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[B]]
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR2]], [[OR1]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[XOR]]
;
%a = fptosi float %fa to i32
@@ -633,10 +627,8 @@ define i32 @xor_to_xnor4(float %fa, float %fb) {
; CHECK-LABEL: @xor_to_xnor4(
; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
; CHECK-NEXT: [[B:%.*]] = fptosi float [[FB:%.*]] to i32
-; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR1:%.*]] = xor i32 [[OR1_DEMORGAN]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[B]], [[A]]
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR2]], [[OR1]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[A]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[XOR]]
;
%a = fptosi float %fa to i32
diff --git a/test/Transforms/InstCombine/bswap-fold.ll b/test/Transforms/InstCombine/bswap-fold.ll
index 91678a91962a..260e2330996e 100644
--- a/test/Transforms/InstCombine/bswap-fold.ll
+++ b/test/Transforms/InstCombine/bswap-fold.ll
@@ -1,35 +1,6 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
-define i1 @test1(i16 %t) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 %t, 256
-; CHECK-NEXT: ret i1 [[TMP2]]
-;
- %tmp1 = call i16 @llvm.bswap.i16( i16 %t )
- %tmp2 = icmp eq i16 %tmp1, 1
- ret i1 %tmp2
-}
-
-define i1 @test2(i32 %tmp) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[TMP_UPGRD_1:%.*]] = icmp eq i32 %tmp, 16777216
-; CHECK-NEXT: ret i1 [[TMP_UPGRD_1]]
-;
- %tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp )
- %tmp.upgrd.1 = icmp eq i32 %tmp34, 1
- ret i1 %tmp.upgrd.1
-}
-
-define i1 @test3(i64 %tmp) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[TMP_UPGRD_2:%.*]] = icmp eq i64 %tmp, 72057594037927936
-; CHECK-NEXT: ret i1 [[TMP_UPGRD_2]]
-;
- %tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp )
- %tmp.upgrd.2 = icmp eq i64 %tmp34, 1
- ret i1 %tmp.upgrd.2
-}
-
; rdar://5992453
; A & 255
define i32 @test4(i32 %a) nounwind {
@@ -241,6 +212,136 @@ define i64 @bs_xor64(i64 %a, i64 %b) #0 {
ret i64 %tmp3
}
+define <2 x i32> @bs_and32vec(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: @bs_and32vec(
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
+; CHECK-NEXT: ret <2 x i32> [[TMP2]]
+;
+ %tmp1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
+ %tmp2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
+ %tmp3 = and <2 x i32> %tmp1, %tmp2
+ ret <2 x i32> %tmp3
+}
+
+define <2 x i32> @bs_or32vec(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: @bs_or32vec(
+; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
+; CHECK-NEXT: ret <2 x i32> [[TMP2]]
+;
+ %tmp1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
+ %tmp2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
+ %tmp3 = or <2 x i32> %tmp1, %tmp2
+ ret <2 x i32> %tmp3
+}
+
+define <2 x i32> @bs_xor32vec(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: @bs_xor32vec(
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
+; CHECK-NEXT: ret <2 x i32> [[TMP2]]
+;
+ %tmp1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
+ %tmp2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
+ %tmp3 = xor <2 x i32> %tmp1, %tmp2
+ ret <2 x i32> %tmp3
+}
+
+define <2 x i32> @bs_and32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: @bs_and32ivec(
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
+; CHECK-NEXT: ret <2 x i32> [[TMP2]]
+;
+ %tmp1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
+ %tmp2 = and <2 x i32> %tmp1, <i32 100001, i32 100001>
+ ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @bs_or32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: @bs_or32ivec(
+; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
+; CHECK-NEXT: ret <2 x i32> [[TMP2]]
+;
+ %tmp1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
+ %tmp2 = or <2 x i32> %tmp1, <i32 100001, i32 100001>
+ ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @bs_xor32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: @bs_xor32ivec(
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
+; CHECK-NEXT: ret <2 x i32> [[TMP2]]
+;
+ %tmp1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
+ %tmp2 = xor <2 x i32> %tmp1, <i32 100001, i32 100001>
+ ret <2 x i32> %tmp2
+}
+
+define i64 @bs_and64_multiuse1(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: @bs_and64_multiuse1(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], [[TMP2]]
+; CHECK-NEXT: ret i64 [[TMP5]]
+;
+ %tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
+ %tmp3 = and i64 %tmp1, %tmp2
+ %tmp4 = mul i64 %tmp3, %tmp1 ; to increase use count of the bswaps
+ %tmp5 = mul i64 %tmp4, %tmp2 ; to increase use count of the bswaps
+ ret i64 %tmp5
+}
+
+define i64 @bs_and64_multiuse2(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: @bs_and64_multiuse2(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A]], [[B:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], [[TMP1]]
+; CHECK-NEXT: ret i64 [[TMP4]]
+;
+ %tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
+ %tmp3 = and i64 %tmp1, %tmp2
+ %tmp4 = mul i64 %tmp3, %tmp1 ; to increase use count of the bswaps
+ ret i64 %tmp4
+}
+
+define i64 @bs_and64_multiuse3(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: @bs_and64_multiuse3(
+; CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[A:%.*]], [[B]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
+; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], [[TMP2]]
+; CHECK-NEXT: ret i64 [[TMP4]]
+;
+ %tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
+ %tmp3 = and i64 %tmp1, %tmp2
+ %tmp4 = mul i64 %tmp3, %tmp2 ; to increase use count of the bswaps
+ ret i64 %tmp4
+}
+
+define i64 @bs_and64i_multiuse(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: @bs_and64i_multiuse(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], 1000000001
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: ret i64 [[TMP3]]
+;
+ %tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %tmp2 = and i64 %tmp1, 1000000001
+ %tmp3 = mul i64 %tmp2, %tmp1 ; to increase use count of the bswap
+ ret i64 %tmp3
+}
+
declare i16 @llvm.bswap.i16(i16)
declare i32 @llvm.bswap.i32(i32)
declare i64 @llvm.bswap.i64(i64)
+declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
diff --git a/test/Transforms/InstCombine/cmp-intrinsic.ll b/test/Transforms/InstCombine/cmp-intrinsic.ll
new file mode 100644
index 000000000000..7fc1d12916bf
--- /dev/null
+++ b/test/Transforms/InstCombine/cmp-intrinsic.ll
@@ -0,0 +1,123 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare i16 @llvm.bswap.i16(i16)
+declare i32 @llvm.bswap.i32(i32)
+declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
+declare i33 @llvm.cttz.i33(i33, i1)
+declare i32 @llvm.ctlz.i32(i32, i1)
+declare i8 @llvm.ctpop.i8(i8)
+declare i11 @llvm.ctpop.i11(i11)
+declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>, i1)
+declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1)
+declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
+
+define i1 @bswap_eq_i16(i16 %x) {
+; CHECK-LABEL: @bswap_eq_i16(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 %x, 256
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %bs = call i16 @llvm.bswap.i16(i16 %x)
+ %cmp = icmp eq i16 %bs, 1
+ ret i1 %cmp
+}
+
+define i1 @bswap_ne_i32(i32 %x) {
+; CHECK-LABEL: @bswap_ne_i32(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 %x, 33554432
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %bs = tail call i32 @llvm.bswap.i32(i32 %x)
+ %cmp = icmp ne i32 %bs, 2
+ ret i1 %cmp
+}
+
+define <2 x i1> @bswap_eq_v2i64(<2 x i64> %x) {
+; CHECK-LABEL: @bswap_eq_v2i64(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i64> %x, <i64 216172782113783808, i64 216172782113783808>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %bs = tail call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %x)
+ %cmp = icmp eq <2 x i64> %bs, <i64 3, i64 3>
+ ret <2 x i1> %cmp
+}
+
+define i1 @ctlz_eq_bitwidth_i32(i32 %x) {
+; CHECK-LABEL: @ctlz_eq_bitwidth_i32(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+ %cmp = icmp eq i32 %lz, 32
+ ret i1 %cmp
+}
+
+define <2 x i1> @ctlz_ne_bitwidth_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: @ctlz_ne_bitwidth_v2i32(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> %a, zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %x = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false)
+ %cmp = icmp ne <2 x i32> %x, <i32 32, i32 32>
+ ret <2 x i1> %cmp
+}
+
+define i1 @cttz_ne_bitwidth_i33(i33 %x) {
+; CHECK-LABEL: @cttz_ne_bitwidth_i33(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i33 %x, 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+ %cmp = icmp ne i33 %tz, 33
+ ret i1 %cmp
+}
+
+define <2 x i1> @cttz_eq_bitwidth_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: @cttz_eq_bitwidth_v2i32(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> %a, zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %x = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 false)
+ %cmp = icmp eq <2 x i32> %x, <i32 32, i32 32>
+ ret <2 x i1> %cmp
+}
+
+define i1 @ctpop_eq_zero_i11(i11 %x) {
+; CHECK-LABEL: @ctpop_eq_zero_i11(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i11 %x, 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %pop = tail call i11 @llvm.ctpop.i11(i11 %x)
+ %cmp = icmp eq i11 %pop, 0
+ ret i1 %cmp
+}
+
+define <2 x i1> @ctpop_ne_zero_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @ctpop_ne_zero_v2i32(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> %x, zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %pop = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %x)
+ %cmp = icmp ne <2 x i32> %pop, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+define i1 @ctpop_eq_bitwidth_i8(i8 %x) {
+; CHECK-LABEL: @ctpop_eq_bitwidth_i8(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 %x, -1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %pop = tail call i8 @llvm.ctpop.i8(i8 %x)
+ %cmp = icmp eq i8 %pop, 8
+ ret i1 %cmp
+}
+
+define <2 x i1> @ctpop_ne_bitwidth_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @ctpop_ne_bitwidth_v2i32(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> %x, <i32 -1, i32 -1>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %pop = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %x)
+ %cmp = icmp ne <2 x i32> %pop, <i32 32, i32 32>
+ ret <2 x i1> %cmp
+}
+
diff --git a/test/Transforms/InstCombine/consecutive-fences.ll b/test/Transforms/InstCombine/consecutive-fences.ll
index 6f1c41277386..8ecb399f39cb 100644
--- a/test/Transforms/InstCombine/consecutive-fences.ll
+++ b/test/Transforms/InstCombine/consecutive-fences.ll
@@ -4,7 +4,7 @@
; CHECK-LABEL: define void @tinkywinky
; CHECK-NEXT: fence seq_cst
-; CHECK-NEXT: fence singlethread acquire
+; CHECK-NEXT: fence syncscope("singlethread") acquire
; CHECK-NEXT: ret void
; CHECK-NEXT: }
@@ -12,21 +12,21 @@ define void @tinkywinky() {
fence seq_cst
fence seq_cst
fence seq_cst
- fence singlethread acquire
- fence singlethread acquire
- fence singlethread acquire
+ fence syncscope("singlethread") acquire
+ fence syncscope("singlethread") acquire
+ fence syncscope("singlethread") acquire
ret void
}
; CHECK-LABEL: define void @dipsy
; CHECK-NEXT: fence seq_cst
-; CHECK-NEXT: fence singlethread seq_cst
+; CHECK-NEXT: fence syncscope("singlethread") seq_cst
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @dipsy() {
fence seq_cst
- fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
ret void
}
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
index 127fde10e9f7..a12f4206b1c6 100644
--- a/test/Transforms/InstCombine/icmp.ll
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -2979,9 +2979,7 @@ declare i32 @llvm.bswap.i32(i32)
define i1 @bswap_ne(i32 %x, i32 %y) {
; CHECK-LABEL: @bswap_ne(
-; CHECK-NEXT: [[SWAPX:%.*]] = call i32 @llvm.bswap.i32(i32 %x)
-; CHECK-NEXT: [[SWAPY:%.*]] = call i32 @llvm.bswap.i32(i32 %y)
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[SWAPX]], [[SWAPY]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 %x, %y
; CHECK-NEXT: ret i1 [[CMP]]
;
%swapx = call i32 @llvm.bswap.i32(i32 %x)
@@ -2994,9 +2992,7 @@ declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
define <8 x i1> @bswap_vec_eq(<8 x i16> %x, <8 x i16> %y) {
; CHECK-LABEL: @bswap_vec_eq(
-; CHECK-NEXT: [[SWAPX:%.*]] = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %x)
-; CHECK-NEXT: [[SWAPY:%.*]] = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %y)
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i16> [[SWAPX]], [[SWAPY]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i16> %x, %y
; CHECK-NEXT: ret <8 x i1> [[CMP]]
;
%swapx = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %x)
@@ -3009,9 +3005,7 @@ declare i64 @llvm.bitreverse.i64(i64)
define i1 @bitreverse_eq(i64 %x, i64 %y) {
; CHECK-LABEL: @bitreverse_eq(
-; CHECK-NEXT: [[REVX:%.*]] = call i64 @llvm.bitreverse.i64(i64 %x)
-; CHECK-NEXT: [[REVY:%.*]] = call i64 @llvm.bitreverse.i64(i64 %y)
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[REVX]], [[REVY]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 %x, %y
; CHECK-NEXT: ret i1 [[CMP]]
;
%revx = call i64 @llvm.bitreverse.i64(i64 %x)
@@ -3024,9 +3018,7 @@ declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)
define <8 x i1> @bitreverse_vec_ne(<8 x i16> %x, <8 x i16> %y) {
; CHECK-LABEL: @bitreverse_vec_ne(
-; CHECK-NEXT: [[REVX:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %x)
-; CHECK-NEXT: [[REVY:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %y)
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i16> [[REVX]], [[REVY]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i16> %x, %y
; CHECK-NEXT: ret <8 x i1> [[CMP]]
;
%revx = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %x)
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index c294d79f15ef..8d2f06edcaf3 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -475,66 +475,6 @@ define <2 x i1> @ctlz_knownbits3_vec(<2 x i8> %arg) {
ret <2 x i1> %res
}
-define void @cmp.simplify(i32 %a, i32 %b, i1* %c) {
- %lz = tail call i32 @llvm.ctlz.i32(i32 %a, i1 false) nounwind readnone
- %lz.cmp = icmp eq i32 %lz, 32
- store volatile i1 %lz.cmp, i1* %c
- %tz = tail call i32 @llvm.cttz.i32(i32 %a, i1 false) nounwind readnone
- %tz.cmp = icmp ne i32 %tz, 32
- store volatile i1 %tz.cmp, i1* %c
- %pop0 = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
- %pop0.cmp = icmp eq i32 %pop0, 0
- store volatile i1 %pop0.cmp, i1* %c
- %pop1 = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
- %pop1.cmp = icmp eq i32 %pop1, 32
- store volatile i1 %pop1.cmp, i1* %c
- ret void
-; CHECK: @cmp.simplify
-; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
-; CHECK-NEXT: store volatile i1 %lz.cmp, i1* %c
-; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
-; CHECK-NEXT: store volatile i1 %tz.cmp, i1* %c
-; CHECK-NEXT: %pop0.cmp = icmp eq i32 %b, 0
-; CHECK-NEXT: store volatile i1 %pop0.cmp, i1* %c
-; CHECK-NEXT: %pop1.cmp = icmp eq i32 %b, -1
-; CHECK-NEXT: store volatile i1 %pop1.cmp, i1* %c
-}
-
-define <2 x i1> @ctlz_cmp_vec(<2 x i32> %a) {
-; CHECK-LABEL: @ctlz_cmp_vec(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> %a, zeroinitializer
-; CHECK-NEXT: ret <2 x i1> [[CMP]]
-;
- %x = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind readnone
- %cmp = icmp eq <2 x i32> %x, <i32 32, i32 32>
- ret <2 x i1> %cmp
-}
-
-define <2 x i1> @cttz_cmp_vec(<2 x i32> %a) {
-; CHECK-LABEL: @cttz_cmp_vec(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> %a, zeroinitializer
-; CHECK-NEXT: ret <2 x i1> [[CMP]]
-;
- %x = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 false) nounwind readnone
- %cmp = icmp ne <2 x i32> %x, <i32 32, i32 32>
- ret <2 x i1> %cmp
-}
-
-define void @ctpop_cmp_vec(<2 x i32> %a, <2 x i1>* %b) {
- %pop0 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a) nounwind readnone
- %pop0.cmp = icmp eq <2 x i32> %pop0, zeroinitializer
- store volatile <2 x i1> %pop0.cmp, <2 x i1>* %b
- %pop1 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a) nounwind readnone
- %pop1.cmp = icmp eq <2 x i32> %pop1, < i32 32, i32 32 >
- store volatile <2 x i1> %pop1.cmp, <2 x i1>* %b
- ret void
-; CHECK-LABEL: @ctpop_cmp_vec(
-; CHECK-NEXT: %pop0.cmp = icmp eq <2 x i32> %a, zeroinitializer
-; CHECK-NEXT: store volatile <2 x i1> %pop0.cmp, <2 x i1>* %b
-; CHECK-NEXT: %pop1.cmp = icmp eq <2 x i32> %a, <i32 -1, i32 -1>
-; CHECK-NEXT: store volatile <2 x i1> %pop1.cmp, <2 x i1>* %b
-}
-
define i32 @ctlz_undef(i32 %Value) {
; CHECK-LABEL: @ctlz_undef(
; CHECK-NEXT: ret i32 undef
diff --git a/test/Transforms/InstCombine/or-xor.ll b/test/Transforms/InstCombine/or-xor.ll
index 2164f0df8d27..947971c6c83b 100644
--- a/test/Transforms/InstCombine/or-xor.ll
+++ b/test/Transforms/InstCombine/or-xor.ll
@@ -348,10 +348,8 @@ define i8 @test18(i8 %A, i8 %B) {
; ((x | y) ^ (~x | ~y)) -> ~(x ^ y)
define i32 @test19(i32 %x, i32 %y) {
; CHECK-LABEL: @test19(
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[OR2_DEMORGAN:%.*]] = and i32 [[X]], [[Y]]
-; CHECK-NEXT: [[OR2:%.*]] = xor i32 [[OR2_DEMORGAN]], -1
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR1]], [[OR2]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[XOR]]
;
%noty = xor i32 %y, -1
@@ -365,10 +363,8 @@ define i32 @test19(i32 %x, i32 %y) {
; ((x | y) ^ (~y | ~x)) -> ~(x ^ y)
define i32 @test20(i32 %x, i32 %y) {
; CHECK-LABEL: @test20(
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[OR2_DEMORGAN:%.*]] = and i32 [[Y]], [[X]]
-; CHECK-NEXT: [[OR2:%.*]] = xor i32 [[OR2_DEMORGAN]], -1
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR1]], [[OR2]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[XOR]]
;
%noty = xor i32 %y, -1
@@ -382,10 +378,8 @@ define i32 @test20(i32 %x, i32 %y) {
; ((~x | ~y) ^ (x | y)) -> ~(x ^ y)
define i32 @test21(i32 %x, i32 %y) {
; CHECK-LABEL: @test21(
-; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[OR1:%.*]] = xor i32 [[OR1_DEMORGAN]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[X]], [[Y]]
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR2]], [[OR1]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[XOR]]
;
%noty = xor i32 %y, -1
@@ -399,10 +393,8 @@ define i32 @test21(i32 %x, i32 %y) {
; ((~x | ~y) ^ (y | x)) -> ~(x ^ y)
define i32 @test22(i32 %x, i32 %y) {
; CHECK-LABEL: @test22(
-; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[OR1:%.*]] = xor i32 [[OR1_DEMORGAN]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[Y]], [[X]]
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR2]], [[OR1]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[XOR]]
;
%noty = xor i32 %y, -1
diff --git a/test/Transforms/InstCombine/pr33689_same_bitwidth.ll b/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
new file mode 100644
index 000000000000..e5dd019b9b51
--- /dev/null
+++ b/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -instcombine %s -o - | FileCheck %s
+
+; All the "useless" instructions should be removed and we shouldn't crash.
+
+target datalayout = "p:16:16"
+
+%i64_t = type i64
+
+@a = external global i16
+@b = external global i16*
+
+define void @f() {
+; CHECK-LABEL: @f(
+; CHECK-NEXT: bb0:
+; CHECK-NEXT: [[TMP12:%.*]] = alloca [2 x i32], align 8
+; CHECK-NEXT: [[TMP12_SUB:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP12]], i16 0, i16 0
+; CHECK-NEXT: br i1 undef, label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint [2 x i32]* [[TMP12]] to i16
+; CHECK-NEXT: store i16 [[TMP8]], i16* @a, align 2
+; CHECK-NEXT: unreachable
+; CHECK: bb2:
+; CHECK-NEXT: [[TMP9:%.*]] = load i16*, i16** @b, align 2
+; CHECK-NEXT: store i16 0, i16* [[TMP9]], align 2
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP12_SUB]], align 8
+; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -1
+; CHECK-NEXT: store i32 [[TMP11]], i32* [[TMP12_SUB]], align 8
+; CHECK-NEXT: ret void
+;
+bb0:
+ %tmp1 = alloca %i64_t
+ %tmp2 = bitcast %i64_t* %tmp1 to i32*
+ %useless3 = bitcast %i64_t* %tmp1 to i16*
+ %useless4 = getelementptr inbounds i16, i16* %useless3, i16 undef
+ %useless5 = bitcast i16* %useless4 to i32*
+ br i1 undef, label %bb1, label %bb2
+
+bb1: ; preds = %bb0
+ %useless6 = insertvalue [1 x i32*] undef, i32* %tmp2, 0
+ %useless7 = insertvalue [1 x i32*] %useless6, i32* null, 0
+ %tmp8 = ptrtoint i32* %tmp2 to i16
+ store i16 %tmp8, i16* @a
+ unreachable
+
+bb2: ; preds = %bb0
+ %tmp9 = load i16*, i16** @b
+ store i16 0, i16* %tmp9
+ %tmp10 = load i32, i32* %tmp2
+ %tmp11 = sub i32 %tmp10, 1
+ store i32 %tmp11, i32* %tmp2
+ ret void
+}
diff --git a/test/Transforms/InstCombine/select-implied.ll b/test/Transforms/InstCombine/select-implied.ll
index 2100e3eae008..2558745c18f3 100644
--- a/test/Transforms/InstCombine/select-implied.ll
+++ b/test/Transforms/InstCombine/select-implied.ll
@@ -121,3 +121,80 @@ end:
declare void @foo(i32)
declare i32 @bar(i32)
+
+; CHECK-LABEL: @test_and
+; CHECK: tpath:
+; CHECK-NOT: select
+; CHECK: ret i32 313
+define i32 @test_and(i32 %a, i32 %b) {
+entry:
+ %cmp1 = icmp ne i32 %a, 0
+ %cmp2 = icmp ne i32 %b, 0
+ %and = and i1 %cmp1, %cmp2
+ br i1 %and, label %tpath, label %end
+
+tpath:
+ %cmp3 = icmp eq i32 %a, 0 ;; <-- implied false
+ %c = select i1 %cmp3, i32 0, i32 313
+ ret i32 %c
+
+end:
+ ret i32 0
+}
+
+; cmp1 and cmp2 are false on the 'fpath' path and thus cmp3 is true.
+; CHECK-LABEL: @test_or1
+; CHECK: fpath:
+; CHECK-NOT: select
+; CHECK: ret i32 37
+define i32 @test_or1(i32 %a, i32 %b) {
+entry:
+ %cmp1 = icmp eq i32 %a, 0
+ %cmp2 = icmp eq i32 %b, 0
+ %or = or i1 %cmp1, %cmp2
+ br i1 %or, label %end, label %fpath
+
+fpath:
+ %cmp3 = icmp ne i32 %a, 0 ;; <-- implied true
+ %c = select i1 %cmp3, i32 37, i32 0
+ ret i32 %c
+
+end:
+ ret i32 0
+}
+
+; LHS ==> RHS by definition (true -> true)
+; CHECK-LABEL: @test6
+; CHECK: taken:
+; CHECK-NOT: select
+; CHECK: call void @foo(i32 10)
+define void @test6(i32 %a, i32 %b) {
+ %cmp1 = icmp eq i32 %a, %b
+ br i1 %cmp1, label %taken, label %end
+
+taken:
+ %c = select i1 %cmp1, i32 10, i32 0
+ call void @foo(i32 %c)
+ br label %end
+
+end:
+ ret void
+}
+
+; LHS ==> RHS by definition (false -> false)
+; CHECK-LABEL: @test7
+; CHECK: taken:
+; CHECK-NOT: select
+; CHECK: call void @foo(i32 11)
+define void @test7(i32 %a, i32 %b) {
+ %cmp1 = icmp eq i32 %a, %b
+ br i1 %cmp1, label %end, label %taken
+
+taken:
+ %c = select i1 %cmp1, i32 0, i32 11
+ call void @foo(i32 %c)
+ br label %end
+
+end:
+ ret void
+}
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
index c8f2a50b72ed..acfa053daaf8 100644
--- a/test/Transforms/InstCombine/select.ll
+++ b/test/Transforms/InstCombine/select.ll
@@ -1370,3 +1370,10 @@ define i8 @assume_cond_false(i1 %cond, i8 %x, i8 %y) {
ret i8 %sel
}
+; Test case to make sure we don't consider an all ones float values for converting the select into a sext.
+define <4 x float> @PR33721(<4 x float> %w) {
+entry:
+ %0 = fcmp ole <4 x float> %w, zeroinitializer
+ %1 = select <4 x i1> %0, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, <4 x float> zeroinitializer
+ ret <4 x float> %1
+}
diff --git a/test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll b/test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll
index 5938f9d7321d..715c9413a819 100644
--- a/test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll
+++ b/test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll
@@ -854,3 +854,32 @@ define void @load_factor2_fp128(<4 x fp128>* %ptr) {
%v1 = shufflevector <4 x fp128> %interleaved.vec, <4 x fp128> undef, <2 x i32> <i32 1, i32 3>
ret void
}
+
+define void @load_factor2_wide_pointer(<16 x i32*>* %ptr) {
+; NEON-LABEL: @load_factor2_wide_pointer(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <16 x i32*>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to i8*
+; NEON-NEXT: [[VLDN:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32.p0i8(i8* [[TMP2]], i32 4)
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN]], 1
+; NEON-NEXT: [[TMP4:%.*]] = inttoptr <4 x i32> [[TMP3]] to <4 x i32*>
+; NEON-NEXT: [[TMP5:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN]], 0
+; NEON-NEXT: [[TMP6:%.*]] = inttoptr <4 x i32> [[TMP5]] to <4 x i32*>
+; NEON-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP1]], i32 8
+; NEON-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
+; NEON-NEXT: [[VLDN1:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32.p0i8(i8* [[TMP8]], i32 4)
+; NEON-NEXT: [[TMP9:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN1]], 1
+; NEON-NEXT: [[TMP10:%.*]] = inttoptr <4 x i32> [[TMP9]] to <4 x i32*>
+; NEON-NEXT: [[TMP11:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN1]], 0
+; NEON-NEXT: [[TMP12:%.*]] = inttoptr <4 x i32> [[TMP11]] to <4 x i32*>
+; NEON-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32*> [[TMP4]], <4 x i32*> [[TMP10]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP14:%.*]] = shufflevector <4 x i32*> [[TMP6]], <4 x i32*> [[TMP12]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor2_wide_pointer(
+; NO_NEON-NOT: @llvm.arm.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <16 x i32*>, <16 x i32*>* %ptr, align 4
+ %v0 = shufflevector <16 x i32*> %interleaved.vec, <16 x i32*> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %v1 = shufflevector <16 x i32*> %interleaved.vec, <16 x i32*> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret void
+}
diff --git a/test/Transforms/LoopRotate/pr33701.ll b/test/Transforms/LoopRotate/pr33701.ll
new file mode 100644
index 000000000000..ed162b120982
--- /dev/null
+++ b/test/Transforms/LoopRotate/pr33701.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -loop-rotate -verify-dom-info -verify-loop-info -disable-output
+
+define void @func() {
+bb0:
+ br label %bb1
+
+bb1: ; preds = %bb4, %bb0
+ %0 = phi i16 [ %2, %bb4 ], [ 0, %bb0 ]
+ %1 = icmp sle i16 %0, 2
+ br i1 %1, label %bb2, label %bb5
+
+bb2: ; preds = %bb1
+ br i1 undef, label %bb6, label %bb4
+
+bb3: ; No predecessors!
+ br label %bb6
+
+bb4: ; preds = %bb2
+ %2 = add i16 undef, 1
+ br label %bb1
+
+bb5: ; preds = %bb1
+ br label %bb6
+
+bb6: ; preds = %bb5, %bb3, %bb2
+ unreachable
+}
diff --git a/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll b/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll
index dcd068191e10..ea3f60772319 100644
--- a/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll
+++ b/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll
@@ -14,8 +14,8 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; current LSR cost model.
; CHECK-NOT: = ptrtoint i8* undef to i64
; CHECK: .lr.ph
-; CHECK: [[TMP:%[^ ]+]] = add i64 %tmp5, 1
-; CHECK: sub i64 [[TMP]], %tmp6
+; CHECK: [[TMP:%[^ ]+]] = add i64 %tmp{{[0-9]+}}, -1
+; CHECK: sub i64 [[TMP]], %tmp{{[0-9]+}}
; CHECK: ret void
define void @VerifyDiagnosticConsumerTest() unnamed_addr nounwind uwtable align 2 {
bb:
diff --git a/test/Transforms/LoopStrengthReduce/X86/lsr-filtering-scaledreg.ll b/test/Transforms/LoopStrengthReduce/X86/lsr-filtering-scaledreg.ll
new file mode 100644
index 000000000000..4ce6f1a79fbf
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/lsr-filtering-scaledreg.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -loop-reduce -lsr-filter-same-scaled-reg=true -mtriple=x86_64-unknown-linux-gnu -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+%struct.ham = type { i8, i8, [5 x i32], i64, i64, i64 }
+
+@global = external local_unnamed_addr global %struct.ham, align 8
+
+define void @foo() local_unnamed_addr {
+bb:
+ %tmp = load i64, i64* getelementptr inbounds (%struct.ham, %struct.ham* @global, i64 0, i32 3), align 8
+ %tmp1 = and i64 %tmp, 1792
+ %tmp2 = load i64, i64* getelementptr inbounds (%struct.ham, %struct.ham* @global, i64 0, i32 4), align 8
+ %tmp3 = add i64 %tmp1, %tmp2
+ %tmp4 = load i8*, i8** null, align 8
+ %tmp5 = getelementptr inbounds i8, i8* %tmp4, i64 0
+ %tmp6 = sub i64 0, %tmp3
+ %tmp7 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp6
+ %tmp8 = inttoptr i64 0 to i8*
+ br label %bb9
+
+; Without filtering non-optimal formulae with the same ScaledReg and Scale, the strategy
+; to narrow LSR search space by picking winner reg will generate only one lsr.iv and
+; unoptimal result.
+; CHECK-LABEL: @foo(
+; CHECK: bb9:
+; CHECK-NEXT: = phi i8*
+; CHECK-NEXT: = phi i8*
+
+bb9: ; preds = %bb12, %bb
+ %tmp10 = phi i8* [ %tmp7, %bb ], [ %tmp16, %bb12 ]
+ %tmp11 = phi i8* [ %tmp8, %bb ], [ %tmp17, %bb12 ]
+ br i1 false, label %bb18, label %bb12
+
+bb12: ; preds = %bb9
+ %tmp13 = getelementptr inbounds i8, i8* %tmp10, i64 8
+ %tmp14 = bitcast i8* %tmp13 to i64*
+ %tmp15 = load i64, i64* %tmp14, align 1
+ %tmp16 = getelementptr inbounds i8, i8* %tmp10, i64 16
+ %tmp17 = getelementptr inbounds i8, i8* %tmp11, i64 16
+ br label %bb9
+
+bb18: ; preds = %bb9
+ %tmp19 = icmp ugt i8* %tmp11, null
+ %tmp20 = getelementptr inbounds i8, i8* %tmp10, i64 8
+ %tmp21 = getelementptr inbounds i8, i8* %tmp11, i64 8
+ %tmp22 = select i1 %tmp19, i8* %tmp10, i8* %tmp20
+ %tmp23 = select i1 %tmp19, i8* %tmp11, i8* %tmp21
+ br label %bb24
+
+bb24: ; preds = %bb24, %bb18
+ %tmp25 = phi i8* [ %tmp27, %bb24 ], [ %tmp22, %bb18 ]
+ %tmp26 = phi i8* [ %tmp29, %bb24 ], [ %tmp23, %bb18 ]
+ %tmp27 = getelementptr inbounds i8, i8* %tmp25, i64 1
+ %tmp28 = load i8, i8* %tmp25, align 1
+ %tmp29 = getelementptr inbounds i8, i8* %tmp26, i64 1
+ store i8 %tmp28, i8* %tmp26, align 1
+ %tmp30 = icmp eq i8* %tmp29, %tmp5
+ br label %bb24
+}
diff --git a/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll b/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll
index 1f31a133e34d..73672e14f78a 100644
--- a/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll
@@ -1,29 +1,52 @@
-; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine -S| FileCheck %s
+; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -S | FileCheck %s -check-prefix=EPILOG-NO-IC
+; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine -S | FileCheck %s -check-prefix=EPILOG
; RUN: opt < %s -loop-unroll -unroll-runtime -unroll-count=2 -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine
+; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=false -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine -S | FileCheck %s -check-prefix=PROLOG
+; RUN: opt < %s -loop-unroll -unroll-runtime -unroll-runtime-epilog=false -unroll-count=2 -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine
-; the second RUN generates an epilog remainder block for all the test
+; the third and fifth RUNs generate an epilog/prolog remainder block for all the test
; cases below (it does not generate a loop).
; test with three exiting and three exit blocks.
; none of the exit blocks have successors
define void @test1(i64 %trip, i1 %cond) {
-; CHECK-LABEL: test1
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[TRIP:%.*]], -1
-; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TRIP]], 7
-; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7
-; CHECK-NEXT: br i1 [[TMP1]], label %exit2.loopexit.unr-lcssa, label [[ENTRY_NEW:%.*]]
-; CHECK: entry.new:
-; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TRIP]], [[XTRAITER]]
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK-LABEL: loop_latch.epil:
-; CHECK-NEXT: %epil.iter.sub = add i64 %epil.iter, -1
-; CHECK-NEXT: %epil.iter.cmp = icmp eq i64 %epil.iter.sub, 0
-; CHECK-NEXT: br i1 %epil.iter.cmp, label %exit2.loopexit.epilog-lcssa, label %loop_header.epil
-; CHECK-LABEL: loop_latch.7:
-; CHECK-NEXT: %niter.nsub.7 = add i64 %niter, -8
-; CHECK-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.nsub.7, 0
-; CHECK-NEXT: br i1 %niter.ncmp.7, label %exit2.loopexit.unr-lcssa.loopexit, label %loop_header
+; EPILOG: test1(
+; EPILOG-NEXT: entry:
+; EPILOG-NEXT: [[TMP0:%.*]] = add i64 [[TRIP:%.*]], -1
+; EPILOG-NEXT: [[XTRAITER:%.*]] = and i64 [[TRIP]], 7
+; EPILOG-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7
+; EPILOG-NEXT: br i1 [[TMP1]], label %exit2.loopexit.unr-lcssa, label [[ENTRY_NEW:%.*]]
+; EPILOG: entry.new:
+; EPILOG-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TRIP]], [[XTRAITER]]
+; EPILOG-NEXT: br label [[LOOP_HEADER:%.*]]
+; EPILOG: loop_latch.epil:
+; EPILOG-NEXT: %epil.iter.sub = add i64 %epil.iter, -1
+; EPILOG-NEXT: %epil.iter.cmp = icmp eq i64 %epil.iter.sub, 0
+; EPILOG-NEXT: br i1 %epil.iter.cmp, label %exit2.loopexit.epilog-lcssa, label %loop_header.epil
+; EPILOG: loop_latch.7:
+; EPILOG-NEXT: %niter.nsub.7 = add i64 %niter, -8
+; EPILOG-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.nsub.7, 0
+; EPILOG-NEXT: br i1 %niter.ncmp.7, label %exit2.loopexit.unr-lcssa.loopexit, label %loop_header
+
+; PROLOG: test1(
+; PROLOG-NEXT: entry:
+; PROLOG-NEXT: [[TMP0:%.*]] = add i64 [[TRIP:%.*]], -1
+; PROLOG-NEXT: [[XTRAITER:%.*]] = and i64 [[TRIP]], 7
+; PROLOG-NEXT: [[TMP1:%.*]] = icmp eq i64 [[XTRAITER]], 0
+; PROLOG-NEXT: br i1 [[TMP1]], label %loop_header.prol.loopexit, label %loop_header.prol.preheader
+; PROLOG: loop_header.prol:
+; PROLOG-NEXT: %iv.prol = phi i64 [ 0, %loop_header.prol.preheader ], [ %iv_next.prol, %loop_latch.prol ]
+; PROLOG-NEXT: %prol.iter = phi i64 [ [[XTRAITER]], %loop_header.prol.preheader ], [ %prol.iter.sub, %loop_latch.prol ]
+; PROLOG-NEXT: br i1 %cond, label %loop_latch.prol, label %loop_exiting_bb1.prol
+; PROLOG: loop_latch.prol:
+; PROLOG-NEXT: %iv_next.prol = add i64 %iv.prol, 1
+; PROLOG-NEXT: %prol.iter.sub = add i64 %prol.iter, -1
+; PROLOG-NEXT: %prol.iter.cmp = icmp eq i64 %prol.iter.sub, 0
+; PROLOG-NEXT: br i1 %prol.iter.cmp, label %loop_header.prol.loopexit.unr-lcssa, label %loop_header.prol
+; PROLOG: loop_latch.7:
+; PROLOG-NEXT: %iv_next.7 = add i64 %iv, 8
+; PROLOG-NEXT: %cmp.7 = icmp eq i64 %iv_next.7, %trip
+; PROLOG-NEXT: br i1 %cmp.7, label %exit2.loopexit.unr-lcssa, label %loop_header
entry:
br label %loop_header
@@ -59,17 +82,30 @@ exit2.loopexit:
; %sum.02 and %add. Both of these are incoming values for phi from every exiting
; unrolled block.
define i32 @test2(i32* nocapture %a, i64 %n) {
-; CHECK-LABEL: test2
-; CHECK-LABEL: for.exit2.loopexit:
-; CHECK-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %for.body ], [ 42, %for.exiting_block.1 ], [ %add.1, %for.body.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %for.body.2 ], [ 42, %for.exiting_block.3 ],
-; CHECK-NEXT: br label %for.exit2
-; CHECK-LABEL: for.exit2.loopexit2:
-; CHECK-NEXT: %retval.ph3 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ]
-; CHECK-NEXT: br label %for.exit2
-; CHECK-LABEL: for.exit2:
-; CHECK-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph3, %for.exit2.loopexit2 ]
-; CHECK-NEXT: ret i32 %retval
-; CHECK: %niter.nsub.7 = add i64 %niter, -8
+; EPILOG: test2(
+; EPILOG: for.exit2.loopexit:
+; EPILOG-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %for.body ], [ 42, %for.exiting_block.1 ], [ %add.1, %for.body.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %for.body.2 ], [ 42, %for.exiting_block.3 ],
+; EPILOG-NEXT: br label %for.exit2
+; EPILOG: for.exit2.loopexit2:
+; EPILOG-NEXT: %retval.ph3 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ]
+; EPILOG-NEXT: br label %for.exit2
+; EPILOG: for.exit2:
+; EPILOG-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph3, %for.exit2.loopexit2 ]
+; EPILOG-NEXT: ret i32 %retval
+; EPILOG: %niter.nsub.7 = add i64 %niter, -8
+
+; PROLOG: test2(
+; PROLOG: for.exit2.loopexit:
+; PROLOG-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %for.body ], [ 42, %for.exiting_block.1 ], [ %add.1, %for.body.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %for.body.2 ], [ 42, %for.exiting_block.3 ],
+; PROLOG-NEXT: br label %for.exit2
+; PROLOG: for.exit2.loopexit1:
+; PROLOG-NEXT: %retval.ph2 = phi i32 [ 42, %for.exiting_block.prol ], [ %sum.02.prol, %header.prol ]
+; PROLOG-NEXT: br label %for.exit2
+; PROLOG: for.exit2:
+; PROLOG-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph2, %for.exit2.loopexit1 ]
+; PROLOG-NEXT: ret i32 %retval
+; PROLOG: %indvars.iv.next.7 = add i64 %indvars.iv, 8
+
entry:
br label %header
@@ -102,25 +138,42 @@ for.exit2:
; test with two exiting and three exit blocks.
; the non-latch exiting block has a switch.
define void @test3(i64 %trip, i64 %add) {
-; CHECK-LABEL: test3
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[TRIP:%.*]], -1
-; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TRIP]], 7
-; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7
-; CHECK-NEXT: br i1 [[TMP1]], label %exit2.loopexit.unr-lcssa, label [[ENTRY_NEW:%.*]]
-; CHECK: entry.new:
-; CHECK-NEXT: %unroll_iter = sub i64 [[TRIP]], [[XTRAITER]]
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK-LABEL: loop_header:
-; CHECK-NEXT: %sum = phi i64 [ 0, %entry.new ], [ %sum.next.7, %loop_latch.7 ]
-; CHECK-NEXT: %niter = phi i64 [ %unroll_iter, %entry.new ], [ %niter.nsub.7, %loop_latch.7 ]
-; CHECK-LABEL: loop_exiting_bb1.7:
-; CHECK-NEXT: switch i64 %sum.next.6, label %loop_latch.7
-; CHECK-LABEL: loop_latch.7:
-; CHECK-NEXT: %sum.next.7 = add i64 %sum.next.6, %add
-; CHECK-NEXT: %niter.nsub.7 = add i64 %niter, -8
-; CHECK-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.nsub.7, 0
-; CHECK-NEXT: br i1 %niter.ncmp.7, label %exit2.loopexit.unr-lcssa.loopexit, label %loop_header
+; EPILOG: test3(
+; EPILOG-NEXT: entry:
+; EPILOG-NEXT: [[TMP0:%.*]] = add i64 [[TRIP:%.*]], -1
+; EPILOG-NEXT: [[XTRAITER:%.*]] = and i64 [[TRIP]], 7
+; EPILOG-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7
+; EPILOG-NEXT: br i1 [[TMP1]], label %exit2.loopexit.unr-lcssa, label [[ENTRY_NEW:%.*]]
+; EPILOG: entry.new:
+; EPILOG-NEXT: %unroll_iter = sub i64 [[TRIP]], [[XTRAITER]]
+; EPILOG-NEXT: br label [[LOOP_HEADER:%.*]]
+; EPILOG: loop_header:
+; EPILOG-NEXT: %sum = phi i64 [ 0, %entry.new ], [ %sum.next.7, %loop_latch.7 ]
+; EPILOG-NEXT: %niter = phi i64 [ %unroll_iter, %entry.new ], [ %niter.nsub.7, %loop_latch.7 ]
+; EPILOG: loop_exiting_bb1.7:
+; EPILOG-NEXT: switch i64 %sum.next.6, label %loop_latch.7
+; EPILOG: loop_latch.7:
+; EPILOG-NEXT: %sum.next.7 = add i64 %sum.next.6, %add
+; EPILOG-NEXT: %niter.nsub.7 = add i64 %niter, -8
+; EPILOG-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.nsub.7, 0
+; EPILOG-NEXT: br i1 %niter.ncmp.7, label %exit2.loopexit.unr-lcssa.loopexit, label %loop_header
+
+; PROLOG: test3(
+; PROLOG-NEXT: entry:
+; PROLOG-NEXT: [[TMP0:%.*]] = add i64 [[TRIP:%.*]], -1
+; PROLOG-NEXT: [[XTRAITER:%.*]] = and i64 [[TRIP]], 7
+; PROLOG-NEXT: [[TMP1:%.*]] = icmp eq i64 [[XTRAITER]], 0
+; PROLOG-NEXT: br i1 [[TMP1]], label %loop_header.prol.loopexit, label %loop_header.prol.preheader
+; PROLOG: loop_header:
+; PROLOG-NEXT: %iv = phi i64 [ %iv.unr, %entry.new ], [ %iv_next.7, %loop_latch.7 ]
+; PROLOG-NEXT: %sum = phi i64 [ %sum.unr, %entry.new ], [ %sum.next.7, %loop_latch.7 ]
+; PROLOG: loop_exiting_bb1.7:
+; PROLOG-NEXT: switch i64 %sum.next.6, label %loop_latch.7
+; PROLOG: loop_latch.7:
+; PROLOG-NEXT: %iv_next.7 = add nsw i64 %iv, 8
+; PROLOG-NEXT: %sum.next.7 = add i64 %sum.next.6, %add
+; PROLOG-NEXT: %cmp.7 = icmp eq i64 %iv_next.7, %trip
+; PROLOG-NEXT: br i1 %cmp.7, label %exit2.loopexit.unr-lcssa, label %loop_header
entry:
br label %loop_header
@@ -153,9 +206,13 @@ exit2.loopexit:
; FIXME: Support multiple exiting blocks to the same latch exit block.
define i32 @test4(i32* nocapture %a, i64 %n, i1 %cond) {
-; CHECK-LABEL: test4
-; CHECK-NOT: .unr
-; CHECK-NOT: .epil
+; EPILOG: test4(
+; EPILOG-NOT: .unr
+; EPILOG-NOT: .epil
+
+; PROLOG: test4(
+; PROLOG-NOT: .unr
+; PROLOG-NOT: .prol
entry:
br label %header
@@ -184,21 +241,68 @@ for.exit2:
ret i32 42
}
+; FIXME: Support multiple exiting blocks to the unique exit block.
+define void @unique_exit(i32 %arg) {
+; EPILOG: unique_exit(
+; EPILOG-NOT: .unr
+; EPILOG-NOT: .epil
+
+; PROLOG: unique_exit(
+; PROLOG-NOT: .unr
+; PROLOG-NOT: .prol
+entry:
+ %tmp = icmp sgt i32 undef, %arg
+ br i1 %tmp, label %preheader, label %returnblock
+
+preheader: ; preds = %entry
+ br label %header
+
+LoopExit: ; preds = %header, %latch
+ %tmp2.ph = phi i32 [ %tmp4, %header ], [ -1, %latch ]
+ br label %returnblock
+
+returnblock: ; preds = %LoopExit, %entry
+ %tmp2 = phi i32 [ -1, %entry ], [ %tmp2.ph, %LoopExit ]
+ ret void
+
+header: ; preds = %preheader, %latch
+ %tmp4 = phi i32 [ %inc, %latch ], [ %arg, %preheader ]
+ %inc = add nsw i32 %tmp4, 1
+ br i1 true, label %LoopExit, label %latch
+
+latch: ; preds = %header
+ %cmp = icmp slt i32 %inc, undef
+ br i1 %cmp, label %header, label %LoopExit
+}
+
; two exiting and two exit blocks.
; the non-latch exiting block has duplicate edges to the non-latch exit block.
define i64 @test5(i64 %trip, i64 %add, i1 %cond) {
-; CHECK-LABEL: test5
-; CHECK-LABEL: exit1.loopexit:
-; CHECK-NEXT: %result.ph = phi i64 [ %ivy, %loop_exiting ], [ %ivy, %loop_exiting ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.2, %loop_exiting.2 ],
-; CHECK-NEXT: br label %exit1
-; CHECK-LABEL: exit1.loopexit2:
-; CHECK-NEXT: %ivy.epil = add i64 %iv.epil, %add
-; CHECK-NEXT: br label %exit1
-; CHECK-LABEL: exit1:
-; CHECK-NEXT: %result = phi i64 [ %result.ph, %exit1.loopexit ], [ %ivy.epil, %exit1.loopexit2 ]
-; CHECK-NEXT: ret i64 %result
-; CHECK-LABEL: loop_latch.7:
-; CHECK: %niter.nsub.7 = add i64 %niter, -8
+; EPILOG: test5(
+; EPILOG: exit1.loopexit:
+; EPILOG-NEXT: %result.ph = phi i64 [ %ivy, %loop_exiting ], [ %ivy, %loop_exiting ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.2, %loop_exiting.2 ],
+; EPILOG-NEXT: br label %exit1
+; EPILOG: exit1.loopexit2:
+; EPILOG-NEXT: %ivy.epil = add i64 %iv.epil, %add
+; EPILOG-NEXT: br label %exit1
+; EPILOG: exit1:
+; EPILOG-NEXT: %result = phi i64 [ %result.ph, %exit1.loopexit ], [ %ivy.epil, %exit1.loopexit2 ]
+; EPILOG-NEXT: ret i64 %result
+; EPILOG: loop_latch.7:
+; EPILOG: %niter.nsub.7 = add i64 %niter, -8
+
+; PROLOG: test5(
+; PROLOG: exit1.loopexit:
+; PROLOG-NEXT: %result.ph = phi i64 [ %ivy, %loop_exiting ], [ %ivy, %loop_exiting ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.2, %loop_exiting.2 ],
+; PROLOG-NEXT: br label %exit1
+; PROLOG: exit1.loopexit1:
+; PROLOG-NEXT: %ivy.prol = add i64 %iv.prol, %add
+; PROLOG-NEXT: br label %exit1
+; PROLOG: exit1:
+; PROLOG-NEXT: %result = phi i64 [ %result.ph, %exit1.loopexit ], [ %ivy.prol, %exit1.loopexit1 ]
+; PROLOG-NEXT: ret i64 %result
+; PROLOG: loop_latch.7:
+; PROLOG: %iv_next.7 = add nsw i64 %iv, 8
entry:
br label %loop_header
@@ -230,18 +334,31 @@ latchexit:
; test when exit blocks have successors.
define i32 @test6(i32* nocapture %a, i64 %n, i1 %cond, i32 %x) {
-; CHECK-LABEL: test6
-; CHECK-LABEL: for.exit2.loopexit:
-; CHECK-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %latch ], [ 42, %for.exiting_block.1 ], [ %add.1, %latch.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %latch.2 ],
-; CHECK-NEXT: br label %for.exit2
-; CHECK-LABEL: for.exit2.loopexit2:
-; CHECK-NEXT: %retval.ph3 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ]
-; CHECK-NEXT: br label %for.exit2
-; CHECK-LABEL: for.exit2:
-; CHECK-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph3, %for.exit2.loopexit2 ]
-; CHECK-NEXT: br i1 %cond, label %exit_true, label %exit_false
-; CHECK-LABEL: latch.7:
-; CHECK: %niter.nsub.7 = add i64 %niter, -8
+; EPILOG: test6(
+; EPILOG: for.exit2.loopexit:
+; EPILOG-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %latch ], [ 42, %for.exiting_block.1 ], [ %add.1, %latch.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %latch.2 ],
+; EPILOG-NEXT: br label %for.exit2
+; EPILOG: for.exit2.loopexit2:
+; EPILOG-NEXT: %retval.ph3 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ]
+; EPILOG-NEXT: br label %for.exit2
+; EPILOG: for.exit2:
+; EPILOG-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph3, %for.exit2.loopexit2 ]
+; EPILOG-NEXT: br i1 %cond, label %exit_true, label %exit_false
+; EPILOG: latch.7:
+; EPILOG: %niter.nsub.7 = add i64 %niter, -8
+
+; PROLOG: test6(
+; PROLOG: for.exit2.loopexit:
+; PROLOG-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %latch ], [ 42, %for.exiting_block.1 ], [ %add.1, %latch.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %latch.2 ],
+; PROLOG-NEXT: br label %for.exit2
+; PROLOG: for.exit2.loopexit1:
+; PROLOG-NEXT: %retval.ph2 = phi i32 [ 42, %for.exiting_block.prol ], [ %sum.02.prol, %header.prol ]
+; PROLOG-NEXT: br label %for.exit2
+; PROLOG: for.exit2:
+; PROLOG-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph2, %for.exit2.loopexit1 ]
+; PROLOG-NEXT: br i1 %cond, label %exit_true, label %exit_false
+; PROLOG: latch.7:
+; PROLOG: %indvars.iv.next.7 = add i64 %indvars.iv, 8
entry:
br label %header
@@ -277,3 +394,87 @@ exit_true:
exit_false:
ret i32 %addx
}
+
+; test when value in exit block does not have VMap.
+define i32 @test7(i32 %arg, i32 %arg1, i32 %arg2) {
+; EPILOG-NO-IC: test7(
+; EPILOG-NO-IC: loopexit1.loopexit:
+; EPILOG-NO-IC-NEXT: %sext3.ph = phi i32 [ %shft, %header ], [ %shft, %latch ], [ %shft, %latch.1 ], [ %shft, %latch.2 ], [ %shft, %latch.3 ], [ %shft, %latch.4 ], [ %shft, %latch.5 ], [ %shft, %latch.6 ]
+; EPILOG-NO-IC-NEXT: br label %loopexit1
+; EPILOG-NO-IC: loopexit1.loopexit1:
+; EPILOG-NO-IC-NEXT: %sext3.ph2 = phi i32 [ %shft, %header.epil ]
+; EPILOG-NO-IC-NEXT: br label %loopexit1
+; EPILOG-NO-IC: loopexit1:
+; EPILOG-NO-IC-NEXT: %sext3 = phi i32 [ %sext3.ph, %loopexit1.loopexit ], [ %sext3.ph2, %loopexit1.loopexit1 ]
+bb:
+ %tmp = icmp slt i32 undef, 2
+ %sext = sext i32 undef to i64
+ %shft = ashr exact i32 %arg, 16
+ br i1 %tmp, label %loopexit2, label %preheader
+
+preheader: ; preds = %bb2
+ br label %header
+
+header: ; preds = %latch, %preheader
+ %tmp6 = phi i64 [ 1, %preheader ], [ %add, %latch ]
+ br i1 false, label %loopexit1, label %latch
+
+latch: ; preds = %header
+ %add = add nuw nsw i64 %tmp6, 1
+ %tmp9 = icmp slt i64 %add, %sext
+ br i1 %tmp9, label %header, label %latchexit
+
+latchexit: ; preds = %latch
+ unreachable
+
+loopexit2: ; preds = %bb2
+ ret i32 %shft
+
+loopexit1: ; preds = %header
+ %sext3 = phi i32 [ %shft, %header ]
+ ret i32 %sext3
+}
+
+; Nested loop and inner loop is unrolled
+; FIXME: we cannot unroll with epilog remainder currently, because
+; the outer loop does not contain the epilog preheader and epilog exit (while
+; infact it should). This causes us to choke up on LCSSA form being incorrect in
+; outer loop. However, the exit block where LCSSA fails, is infact still within
+; the outer loop. For now, we just bail out in presence of outer loop and epilog
+; loop is generated.
+; The outer loop header is the preheader for the inner loop and the inner header
+; branches back to the outer loop.
+define void @test8() {
+; EPILOG: test8(
+; EPILOG-NOT: niter
+
+; PROLOG: test8(
+; PROLOG: outerloop:
+; PROLOG-NEXT: phi i64 [ 3, %bb ], [ 0, %outerloop.loopexit ]
+; PROLOG: %lcmp.mod = icmp eq i64
+; PROLOG-NEXT: br i1 %lcmp.mod, label %innerH.prol.loopexit, label %innerH.prol.preheader
+; PROLOG: latch.6:
+; PROLOG-NEXT: %tmp4.7 = add nsw i64 %tmp3, 8
+; PROLOG-NEXT: br i1 false, label %outerloop.loopexit.loopexit, label %latch.7
+; PROLOG: latch.7
+; PROLOG-NEXT: %tmp6.7 = icmp ult i64 %tmp4.7, 100
+; PROLOG-NEXT: br i1 %tmp6.7, label %innerH, label %exit.unr-lcssa
+bb:
+ br label %outerloop
+
+outerloop: ; preds = %innerH, %bb
+ %tmp = phi i64 [ 3, %bb ], [ 0, %innerH ]
+ br label %innerH
+
+innerH: ; preds = %latch, %outerloop
+ %tmp3 = phi i64 [ %tmp4, %latch ], [ %tmp, %outerloop ]
+ %tmp4 = add nuw nsw i64 %tmp3, 1
+ br i1 false, label %outerloop, label %latch
+
+latch: ; preds = %innerH
+ %tmp6 = icmp ult i64 %tmp4, 100
+ br i1 %tmp6, label %innerH, label %exit
+
+exit: ; preds = %latch
+ ret void
+}
diff --git a/test/Transforms/LoopUnroll/runtime-loop.ll b/test/Transforms/LoopUnroll/runtime-loop.ll
index 04661314eb1d..878f4e8c78f0 100644
--- a/test/Transforms/LoopUnroll/runtime-loop.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop.ll
@@ -170,6 +170,74 @@ for.end: ; preds = %for.cond.for.end_cr
ret i16 %res.0.lcssa
}
+; dont unroll loop with multiple exit/exiting blocks, unless
+; -runtime-unroll-multi-exit=true
+; single exit, multiple exiting blocks.
+define void @unique_exit(i32 %arg) {
+; PROLOG: unique_exit(
+; PROLOG-NOT: .unr
+
+; EPILOG: unique_exit(
+; EPILOG-NOT: .unr
+entry:
+ %tmp = icmp sgt i32 undef, %arg
+ br i1 %tmp, label %preheader, label %returnblock
+
+preheader: ; preds = %entry
+ br label %header
+
+LoopExit: ; preds = %header, %latch
+ %tmp2.ph = phi i32 [ %tmp4, %header ], [ -1, %latch ]
+ br label %returnblock
+
+returnblock: ; preds = %LoopExit, %entry
+ %tmp2 = phi i32 [ -1, %entry ], [ %tmp2.ph, %LoopExit ]
+ ret void
+
+header: ; preds = %preheader, %latch
+ %tmp4 = phi i32 [ %inc, %latch ], [ %arg, %preheader ]
+ %inc = add nsw i32 %tmp4, 1
+ br i1 true, label %LoopExit, label %latch
+
+latch: ; preds = %header
+ %cmp = icmp slt i32 %inc, undef
+ br i1 %cmp, label %header, label %LoopExit
+}
+
+; multiple exit blocks. don't unroll
+define void @multi_exit(i64 %trip, i1 %cond) {
+; PROLOG: multi_exit(
+; PROLOG-NOT: .unr
+
+; EPILOG: multi_exit(
+; EPILOG-NOT: .unr
+entry:
+ br label %loop_header
+
+loop_header:
+ %iv = phi i64 [ 0, %entry ], [ %iv_next, %loop_latch ]
+ br i1 %cond, label %loop_latch, label %loop_exiting_bb1
+
+loop_exiting_bb1:
+ br i1 false, label %loop_exiting_bb2, label %exit1
+
+loop_exiting_bb2:
+ br i1 false, label %loop_latch, label %exit3
+
+exit3:
+ ret void
+
+loop_latch:
+ %iv_next = add i64 %iv, 1
+ %cmp = icmp ne i64 %iv_next, %trip
+ br i1 %cmp, label %loop_header, label %exit2.loopexit
+
+exit1:
+ ret void
+
+exit2.loopexit:
+ ret void
+}
!0 = distinct !{!0, !1}
!1 = !{!"llvm.loop.unroll.runtime.disable"}
diff --git a/test/Transforms/LoopVectorize/X86/slm-no-vectorize.ll b/test/Transforms/LoopVectorize/X86/slm-no-vectorize.ll
new file mode 100644
index 000000000000..cd3e89ae7350
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/slm-no-vectorize.ll
@@ -0,0 +1,49 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-unknown-linux -S -mcpu=slm -debug 2>&1 | FileCheck -check-prefix=MSG %s
+; REQUIRES: asserts
+; This test should not be vectorized in X86\SLM arch
+; Vectorizing the 64bit multiply in this case is wrong since
+; it can be done with a lower bit mode (notice that the sources is 16bit)
+; Also addq\subq (quad word) has a high cost on SLM arch.
+; this test has a bad performance (regression of -70%) if vectorized on SLM arch
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @no_vec(i32 %LastIndex, i16* nocapture readonly %InputData, i16 signext %lag, i16 signext %Scale) {
+entry:
+; MSG: LV: Selecting VF: 1.
+ %cmp17 = icmp sgt i32 %LastIndex, 0
+ br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
+
+for.body.lr.ph: ; preds = %entry
+ %conv5 = sext i16 %Scale to i64
+ %sh_prom = and i64 %conv5, 4294967295
+ %0 = sext i16 %lag to i64
+ %wide.trip.count = zext i32 %LastIndex to i64
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ %conv8 = trunc i64 %add7 to i32
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ %Accumulator.0.lcssa = phi i32 [ 0, %entry ], [ %conv8, %for.cond.cleanup.loopexit ]
+ ret i32 %Accumulator.0.lcssa
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
+ %Accumulator.018 = phi i64 [ 0, %for.body.lr.ph ], [ %add7, %for.body ]
+ %arrayidx = getelementptr inbounds i16, i16* %InputData, i64 %indvars.iv
+ %1 = load i16, i16* %arrayidx, align 2
+ %conv = sext i16 %1 to i64
+ %2 = add nsw i64 %indvars.iv, %0
+ %arrayidx3 = getelementptr inbounds i16, i16* %InputData, i64 %2
+ %3 = load i16, i16* %arrayidx3, align 2
+ %conv4 = sext i16 %3 to i64
+ %mul = mul nsw i64 %conv4, %conv
+ %shr = ashr i64 %mul, %sh_prom
+ %add7 = add i64 %shr, %Accumulator.018
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
+
diff --git a/test/Transforms/LoopVectorize/if-conversion-nest.ll b/test/Transforms/LoopVectorize/if-conversion-nest.ll
index 3a581ebf847e..7f381ae6ad7b 100644
--- a/test/Transforms/LoopVectorize/if-conversion-nest.ll
+++ b/test/Transforms/LoopVectorize/if-conversion-nest.ll
@@ -1,18 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-;CHECK-LABEL: @foo(
-;CHECK: icmp sgt
-;CHECK: icmp sgt
-;CHECK: icmp slt
-;CHECK: select <4 x i1>
-;CHECK: %[[P1:.*]] = select <4 x i1>
-;CHECK: xor <4 x i1>
-;CHECK: and <4 x i1>
-;CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %[[P1]]
-;CHECK: ret
define i32 @foo(i32* nocapture %A, i32* nocapture %B, i32 %n) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP26:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP26]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
+; CHECK: for.body.preheader:
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[MIN_ITERS_CHECKED:%.*]]
+; CHECK: min.iters.checked:
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[N]], 3
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = zext i32 [[TMP3]] to i64
+; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[TMP2]], [[N_MOD_VF]]
+; CHECK-NEXT: [[CMP_ZERO:%.*]] = icmp eq i64 [[N_VEC]], 0
+; CHECK-NEXT: br i1 [[CMP_ZERO]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
+; CHECK: vector.memcheck:
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
+; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i32, i32* [[B:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i32* [[SCEVGEP4]], [[A]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[B]]
+; CHECK-NEXT: [[MEMCHECK_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT: br i1 [[MEMCHECK_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to <4 x i32>*
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP8]], align 4, !alias.scope !0, !noalias !3
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
+; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4, !alias.scope !3
+; CHECK-NEXT: [[TMP11:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD6]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], <i32 19, i32 19, i32 19, i32 19>
+; CHECK-NEXT: [[TMP13:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD6]], <i32 4, i32 4, i32 4, i32 4>
+; CHECK-NEXT: [[TMP14:%.*]] = select <4 x i1> [[TMP13]], <4 x i32> <i32 4, i32 4, i32 4, i32 4>, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+; CHECK-NEXT: [[TMP15:%.*]] = and <4 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>, <4 x i32> <i32 9, i32 9, i32 9, i32 9>
+; CHECK-NEXT: [[TMP16:%.*]] = xor <4 x i1> [[TMP12]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[TMP17:%.*]] = and <4 x i1> [[TMP11]], [[TMP16]]
+; CHECK-NEXT: [[PREDPHI7:%.*]] = select <4 x i1> [[TMP17]], <4 x i32> [[TMP14]], <4 x i32> [[PREDPHI]]
+; CHECK-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP7]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[PREDPHI7]], <4 x i32>* [[TMP18]], align 4, !alias.scope !0, !noalias !3
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !5
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP3]], 0
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ], [ 0, [[MIN_ITERS_CHECKED]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END14:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[TMP20]], [[TMP21]]
+; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_END14]]
+; CHECK: if.then:
+; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP20]], 19
+; CHECK-NEXT: br i1 [[CMP6]], label [[IF_END14]], label [[IF_ELSE:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: [[CMP10:%.*]] = icmp slt i32 [[TMP21]], 4
+; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP10]], i32 4, i32 5
+; CHECK-NEXT: br label [[IF_END14]]
+; CHECK: if.end14:
+; CHECK-NEXT: [[X_0:%.*]] = phi i32 [ 9, [[FOR_BODY]] ], [ 3, [[IF_THEN]] ], [ [[DOT]], [[IF_ELSE]] ]
+; CHECK-NEXT: store i32 [[X_0]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop !8
+; CHECK: for.end.loopexit:
+; CHECK-NEXT: br label [[FOR_END]]
+; CHECK: for.end:
+; CHECK-NEXT: ret i32 undef
+;
entry:
%cmp26 = icmp sgt i32 %n, 0
br i1 %cmp26, label %for.body, label %for.end
@@ -46,3 +120,4 @@ if.end14:
for.end:
ret i32 undef
}
+
diff --git a/test/Transforms/LoopVectorize/pr33706.ll b/test/Transforms/LoopVectorize/pr33706.ll
new file mode 100644
index 000000000000..b9d0d8a44acc
--- /dev/null
+++ b/test/Transforms/LoopVectorize/pr33706.ll
@@ -0,0 +1,61 @@
+; RUN: opt -S -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 < %s | FileCheck %s
+
+@global = local_unnamed_addr global i32 0, align 4
+@global.1 = local_unnamed_addr global i32 0, align 4
+@global.2 = local_unnamed_addr global float 0x3EF0000000000000, align 4
+
+; CHECK-LABEL: @PR33706
+; CHECK-NOT: <2 x i32>
+define void @PR33706(float* nocapture readonly %arg, float* nocapture %arg1, i32 %arg2) local_unnamed_addr {
+bb:
+ %tmp = load i32, i32* @global.1, align 4
+ %tmp3 = getelementptr inbounds float, float* %arg, i64 190
+ %tmp4 = getelementptr inbounds float, float* %arg1, i64 512
+ %tmp5 = and i32 %tmp, 65535
+ %tmp6 = icmp ugt i32 %arg2, 65536
+ br i1 %tmp6, label %bb7, label %bb9
+
+bb7: ; preds = %bb
+ %tmp8 = load i32, i32* @global, align 4
+ br label %bb27
+
+bb9: ; preds = %bb
+ %tmp10 = udiv i32 65536, %arg2
+ br label %bb11
+
+bb11: ; preds = %bb11, %bb9
+ %tmp12 = phi i32 [ %tmp20, %bb11 ], [ %tmp5, %bb9 ]
+ %tmp13 = phi float* [ %tmp18, %bb11 ], [ %tmp4, %bb9 ]
+ %tmp14 = phi i32 [ %tmp16, %bb11 ], [ %tmp10, %bb9 ]
+ %tmp15 = phi i32 [ %tmp19, %bb11 ], [ %tmp, %bb9 ]
+ %tmp16 = add nsw i32 %tmp14, -1
+ %tmp17 = sitofp i32 %tmp12 to float
+ store float %tmp17, float* %tmp13, align 4
+ %tmp18 = getelementptr inbounds float, float* %tmp13, i64 1
+ %tmp19 = add i32 %tmp15, %arg2
+ %tmp20 = and i32 %tmp19, 65535
+ %tmp21 = icmp eq i32 %tmp16, 0
+ br i1 %tmp21, label %bb22, label %bb11
+
+bb22: ; preds = %bb11
+ %tmp23 = phi float* [ %tmp18, %bb11 ]
+ %tmp24 = phi i32 [ %tmp19, %bb11 ]
+ %tmp25 = phi i32 [ %tmp20, %bb11 ]
+ %tmp26 = ashr i32 %tmp24, 16
+ store i32 %tmp26, i32* @global, align 4
+ br label %bb27
+
+bb27: ; preds = %bb22, %bb7
+ %tmp28 = phi i32 [ %tmp26, %bb22 ], [ %tmp8, %bb7 ]
+ %tmp29 = phi float* [ %tmp23, %bb22 ], [ %tmp4, %bb7 ]
+ %tmp30 = phi i32 [ %tmp25, %bb22 ], [ %tmp5, %bb7 ]
+ %tmp31 = sext i32 %tmp28 to i64
+ %tmp32 = getelementptr inbounds float, float* %tmp3, i64 %tmp31
+ %tmp33 = load float, float* %tmp32, align 4
+ %tmp34 = sitofp i32 %tmp30 to float
+ %tmp35 = load float, float* @global.2, align 4
+ %tmp36 = fmul float %tmp35, %tmp34
+ %tmp37 = fadd float %tmp33, %tmp36
+ store float %tmp37, float* %tmp29, align 4
+ ret void
+}
diff --git a/test/Transforms/LowerTypeTests/Inputs/import-icall.yaml b/test/Transforms/LowerTypeTests/Inputs/import-icall.yaml
index 17b634acd0e1..558aa9aa73f2 100644
--- a/test/Transforms/LowerTypeTests/Inputs/import-icall.yaml
+++ b/test/Transforms/LowerTypeTests/Inputs/import-icall.yaml
@@ -16,4 +16,5 @@ CfiFunctionDefs:
CfiFunctionDecls:
- external
- external_weak
+ - local_decl
...
diff --git a/test/Transforms/LowerTypeTests/import-icall.ll b/test/Transforms/LowerTypeTests/import-icall.ll
index ddeb7fb5c9a2..b4e374720321 100644
--- a/test/Transforms/LowerTypeTests/import-icall.ll
+++ b/test/Transforms/LowerTypeTests/import-icall.ll
@@ -19,6 +19,10 @@ define i8 @use_b() {
ret i8 %x
}
+define void @local_decl() {
+ call void @local_decl()
+ ret void
+}
declare void @external()
declare extern_weak void @external_weak()
@@ -33,6 +37,9 @@ declare extern_weak void @external_weak()
; CHECK: define internal i8 @local_b() {
; CHECK-NEXT: call i8 @local_a()
+; CHECK: define void @local_decl()
+; CHECK-NEXT: call void @local_decl()
+
; CHECK: declare void @external()
; CHECK: declare extern_weak void @external_weak()
; CHECK: declare i8 @local_a()
diff --git a/test/Transforms/NewGVN/pr33720.ll b/test/Transforms/NewGVN/pr33720.ll
new file mode 100644
index 000000000000..3b6c190a4494
--- /dev/null
+++ b/test/Transforms/NewGVN/pr33720.ll
@@ -0,0 +1,91 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -newgvn -S %s | FileCheck %s
+
+@f = external local_unnamed_addr global i64
+@b = external local_unnamed_addr global i64
+@e = external local_unnamed_addr global i64
+
+define void @patatino() {
+; CHECK-LABEL: @patatino(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 undef, label [[IF_END24:%.*]], label [[FOR_COND16:%.*]]
+; CHECK: for.cond2thread-pre-split:
+; CHECK-NEXT: br i1 false, label [[FOR_BODY:%.*]], label [[FOR_COND8_PREHEADER:%.*]]
+; CHECK: for.cond8.preheader:
+; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label %for.cond11thread-pre-split.lr.ph
+; CHECK: for.cond11thread-pre-split.lr.ph:
+; CHECK-NEXT: br label [[L1]]
+; CHECK: for.body:
+; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[K_2:%.*]], 3
+; CHECK-NEXT: [[CONV4:%.*]] = zext i1 [[CMP3]] to i64
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* @f
+; CHECK-NEXT: [[OR:%.*]] = or i64 [[TMP0]], [[CONV4]]
+; CHECK-NEXT: store i64 [[OR]], i64* @f
+; CHECK-NEXT: [[TOBOOL7:%.*]] = icmp ne i64 [[K_2]], 0
+; CHECK-NEXT: br i1 [[TOBOOL7]], label %for.cond2thread-pre-split, label [[LOR_RHS:%.*]]
+; CHECK: lor.rhs:
+; CHECK-NEXT: store i64 1, i64* @b, align 8
+; CHECK-NEXT: br label %for.cond2thread-pre-split
+; CHECK: l1:
+; CHECK-NEXT: [[K_2]] = phi i64 [ undef, [[L1_PREHEADER:%.*]] ], [ 15, [[FOR_COND8_PREHEADER]] ], [ 5, %for.cond11thread-pre-split.lr.ph ]
+; CHECK-NEXT: store i64 7, i64* [[J_3:%.*]]
+; CHECK-NEXT: br label [[FOR_BODY]]
+; CHECK: for.cond16:
+; CHECK-NEXT: [[J_0:%.*]] = phi i64* [ @f, [[ENTRY:%.*]] ], [ undef, [[FOR_COND20:%.*]] ], [ @e, [[FOR_COND16]] ]
+; CHECK-NEXT: br i1 undef, label [[FOR_COND20]], label [[FOR_COND16]]
+; CHECK: for.cond20:
+; CHECK-NEXT: [[J_2:%.*]] = phi i64* [ [[J_0]], [[FOR_COND16]] ], [ undef, [[IF_END24]] ]
+; CHECK-NEXT: br i1 true, label [[IF_END24]], label [[FOR_COND16]]
+; CHECK: if.end24:
+; CHECK-NEXT: [[J_3]] = phi i64* [ [[J_2]], [[FOR_COND20]] ], [ undef, [[ENTRY]] ]
+; CHECK-NEXT: br i1 false, label [[FOR_COND20]], label [[L1_PREHEADER]]
+; CHECK: l1.preheader:
+; CHECK-NEXT: br label [[L1]]
+;
+entry:
+ br i1 undef, label %if.end24, label %for.cond16
+
+for.cond2thread-pre-split:
+ br i1 false, label %for.body, label %for.cond8.preheader
+
+for.cond8.preheader:
+ br i1 undef, label %l1, label %for.cond11thread-pre-split.lr.ph
+
+for.cond11thread-pre-split.lr.ph:
+ br label %l1
+
+for.body:
+ %k.031 = phi i64 [ %k.2, %l1 ], [ 15, %for.cond2thread-pre-split ]
+ %cmp3 = icmp ne i64 %k.031, 3
+ %conv4 = zext i1 %cmp3 to i64
+ %0 = load i64, i64* @f
+ %or = or i64 %0, %conv4
+ store i64 %or, i64* @f
+ %tobool7 = icmp ne i64 %k.031, 0
+ %or.cond = or i1 %tobool7, false
+ br i1 %or.cond, label %for.cond2thread-pre-split, label %lor.rhs
+
+lor.rhs:
+ store i64 1, i64* @b, align 8
+ br label %for.cond2thread-pre-split
+
+l1:
+ %k.2 = phi i64 [ undef, %l1.preheader ], [ 15, %for.cond8.preheader ], [ 5, %for.cond11thread-pre-split.lr.ph ]
+ store i64 7, i64* %j.3
+ br label %for.body
+
+for.cond16:
+ %j.0 = phi i64* [ @f, %entry ], [ %j.2, %for.cond20 ], [ @e, %for.cond16 ]
+ br i1 undef, label %for.cond20, label %for.cond16
+
+for.cond20:
+ %j.2 = phi i64* [ %j.0, %for.cond16 ], [ %j.3, %if.end24 ]
+ br i1 true, label %if.end24, label %for.cond16
+
+if.end24:
+ %j.3 = phi i64* [ %j.2, %for.cond20 ], [ undef, %entry ]
+ br i1 false, label %for.cond20, label %l1.preheader
+
+l1.preheader:
+ br label %l1
+}
diff --git a/test/Transforms/PGOProfile/counter_promo_exit_merge.ll b/test/Transforms/PGOProfile/counter_promo_exit_merge.ll
index f53d37600ce6..85ca1613c8ad 100644
--- a/test/Transforms/PGOProfile/counter_promo_exit_merge.ll
+++ b/test/Transforms/PGOProfile/counter_promo_exit_merge.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -instrprof -do-counter-promotion=true -speculative-counter-promotion -S | FileCheck --check-prefix=PROMO %s
-; RUN: opt < %s --passes=instrprof -do-counter-promotion=true -speculative-counter-promotion -S | FileCheck --check-prefix=PROMO %s
+; RUN: opt < %s -instrprof -do-counter-promotion=true -speculative-counter-promotion-max-exiting=3 -S | FileCheck --check-prefix=PROMO %s
+; RUN: opt < %s --passes=instrprof -do-counter-promotion=true -speculative-counter-promotion-max-exiting=3 -S | FileCheck --check-prefix=PROMO %s
$__llvm_profile_raw_version = comdat any
diff --git a/test/Transforms/PGOProfile/counter_promo_mexits.ll b/test/Transforms/PGOProfile/counter_promo_mexits.ll
index 71e5f066d50f..bb799757a47c 100644
--- a/test/Transforms/PGOProfile/counter_promo_mexits.ll
+++ b/test/Transforms/PGOProfile/counter_promo_mexits.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -pgo-instr-gen -instrprof -do-counter-promotion=true -speculative-counter-promotion -S | FileCheck --check-prefix=PROMO %s
-; RUN: opt < %s --passes=pgo-instr-gen,instrprof -do-counter-promotion=true -speculative-counter-promotion -S | FileCheck --check-prefix=PROMO %s
+; RUN: opt < %s -pgo-instr-gen -instrprof -do-counter-promotion=true -speculative-counter-promotion-max-exiting=3 -S | FileCheck --check-prefix=PROMO %s
+; RUN: opt < %s --passes=pgo-instr-gen,instrprof -do-counter-promotion=true -speculative-counter-promotion-max-exiting=3 -S | FileCheck --check-prefix=PROMO %s
@g = common local_unnamed_addr global i32 0, align 4
diff --git a/test/Transforms/PGOProfile/counter_promo_nest.ll b/test/Transforms/PGOProfile/counter_promo_nest.ll
new file mode 100644
index 000000000000..b7f117b3e949
--- /dev/null
+++ b/test/Transforms/PGOProfile/counter_promo_nest.ll
@@ -0,0 +1,165 @@
+; TEST that counter updates are promoted outside the whole loop nest
+; RUN: opt < %s -pgo-instr-gen -instrprof -do-counter-promotion=true -S | FileCheck --check-prefix=PROMO %s
+; RUN: opt < %s --passes=pgo-instr-gen,instrprof -do-counter-promotion=true -S | FileCheck --check-prefix=PROMO %s
+
+@g = common local_unnamed_addr global i32 0, align 4
+@c = local_unnamed_addr global i32 10, align 4
+
+; Function Attrs: noinline norecurse nounwind uwtable
+define void @bar() local_unnamed_addr #0 {
+bb:
+ %tmp2 = load i32, i32* @g, align 4, !tbaa !2
+ %tmp3 = add nsw i32 %tmp2, 1
+ store i32 %tmp3, i32* @g, align 4, !tbaa !2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind uwtable
+define i32 @main() local_unnamed_addr #1 {
+bb:
+ store i32 0, i32* @g, align 4, !tbaa !2
+ %tmp = load i32, i32* @c, align 4, !tbaa !2
+ %tmp1 = icmp sgt i32 %tmp, 0
+ br i1 %tmp1, label %bb2_1, label %bb84
+
+bb2_1:
+ br label %bb2
+
+bb2: ; preds = %bb39, %bb
+ %tmp3 = phi i32 [ %tmp40, %bb39 ], [ %tmp, %bb2_1 ]
+ %tmp5 = phi i32 [ %tmp43, %bb39 ], [ 0, %bb2_1 ]
+ %tmp7 = icmp sgt i32 %tmp3, 0
+ br i1 %tmp7, label %bb14_1, label %bb39
+
+bb8: ; preds = %bb39
+; PROMO-LABEL: bb8
+; PROMO: load {{.*}} @__profc_main{{.*}}
+; PROMO-NEXT: add
+; PROMO-NEXT: store {{.*}}@__profc_main{{.*}}
+; PROMO-NEXT: load {{.*}} @__profc_main{{.*}}
+; PROMO-NEXT: add
+; PROMO-NEXT: store {{.*}}@__profc_main{{.*}}
+; PROMO-NEXT: load {{.*}} @__profc_main{{.*}}
+; PROMO-NEXT: add
+; PROMO-NEXT: store {{.*}}@__profc_main{{.*}}
+; PROMO-NEXT: load {{.*}} @__profc_main{{.*}}
+; PROMO-NEXT: add
+; PROMO-NEXT: store {{.*}}@__profc_main{{.*}}
+; PROMO-NEXT: load {{.*}} @__profc_main{{.*}}
+; PROMO-NEXT: add
+; PROMO-NEXT: store {{.*}}@__profc_main{{.*}}
+
+ %tmp13 = icmp sgt i32 %tmp40, 0
+ br i1 %tmp13, label %bb45, label %bb84
+
+bb14_1:
+ br label %bb14
+
+bb14: ; preds = %bb29, %bb2
+ %tmp15 = phi i32 [ %tmp30, %bb29 ], [ %tmp3, %bb14_1 ]
+ %tmp16 = phi i64 [ %tmp31, %bb29 ], [ 0, %bb14_1 ]
+ %tmp17 = phi i64 [ %tmp32, %bb29 ], [ 0, %bb14_1 ]
+ %tmp18 = phi i32 [ %tmp33, %bb29 ], [ 0, %bb14_1 ]
+ %tmp19 = icmp sgt i32 %tmp15, 0
+ br i1 %tmp19, label %bb20_split, label %bb29
+
+bb20_split:
+ br label %bb20
+
+bb20: ; preds = %bb20, %bb14
+ %tmp21 = phi i64 [ %tmp23, %bb20 ], [ 0, %bb20_split ]
+ %tmp22 = phi i32 [ %tmp24, %bb20 ], [ 0, %bb20_split ]
+ %tmp23 = add nuw i64 %tmp21, 1
+ tail call void @bar()
+ %tmp24 = add nuw nsw i32 %tmp22, 1
+ %tmp25 = load i32, i32* @c, align 4, !tbaa !2
+ %tmp26 = icmp slt i32 %tmp24, %tmp25
+ br i1 %tmp26, label %bb20, label %bb27
+
+bb27: ; preds = %bb20
+ %tmp28 = add i64 %tmp23, %tmp16
+ br label %bb29
+
+bb29: ; preds = %bb27, %bb14
+ %tmp30 = phi i32 [ %tmp25, %bb27 ], [ %tmp15, %bb14 ]
+ %tmp31 = phi i64 [ %tmp28, %bb27 ], [ %tmp16, %bb14 ]
+ %tmp32 = add nuw i64 %tmp17, 1
+ %tmp33 = add nuw nsw i32 %tmp18, 1
+ %tmp34 = icmp slt i32 %tmp33, %tmp30
+ br i1 %tmp34, label %bb14, label %bb35
+
+bb35: ; preds = %bb29
+ %tmp36 = insertelement <2 x i64> undef, i64 %tmp31, i32 0
+ br label %bb39
+
+bb39: ; preds = %bb35, %bb2
+ %tmp40 = phi i32 [ %tmp30, %bb35 ], [ %tmp3, %bb2 ]
+ %tmp43 = add nuw nsw i32 %tmp5, 1
+ %tmp44 = icmp slt i32 %tmp43, %tmp40
+ br i1 %tmp44, label %bb2, label %bb8
+
+bb45: ; preds = %bb67, %bb8
+ %tmp46 = phi i32 [ %tmp68, %bb67 ], [ %tmp40, %bb8 ]
+ %tmp47 = phi i64 [ %tmp69, %bb67 ], [ 0, %bb8 ]
+ %tmp48 = phi i64 [ %tmp70, %bb67 ], [ 0, %bb8 ]
+ %tmp49 = phi i32 [ %tmp71, %bb67 ], [ 0, %bb8 ]
+ %tmp50 = icmp sgt i32 %tmp46, 0
+ br i1 %tmp50, label %bb57, label %bb67
+
+bb51: ; preds = %bb67
+ %tmp56 = icmp sgt i32 %tmp68, 0
+ br i1 %tmp56, label %bb73, label %bb84
+
+bb57: ; preds = %bb57, %bb45
+ %tmp58 = phi i64 [ %tmp60, %bb57 ], [ 0, %bb45 ]
+ %tmp59 = phi i32 [ %tmp61, %bb57 ], [ 0, %bb45 ]
+ %tmp60 = add nuw i64 %tmp58, 1
+ tail call void @bar()
+ %tmp61 = add nuw nsw i32 %tmp59, 1
+ %tmp62 = load i32, i32* @c, align 4, !tbaa !2
+ %tmp63 = mul nsw i32 %tmp62, 10
+ %tmp64 = icmp slt i32 %tmp61, %tmp63
+ br i1 %tmp64, label %bb57, label %bb65
+
+bb65: ; preds = %bb57
+ %tmp66 = add i64 %tmp60, %tmp47
+ br label %bb67
+
+bb67: ; preds = %bb65, %bb45
+ %tmp68 = phi i32 [ %tmp62, %bb65 ], [ %tmp46, %bb45 ]
+ %tmp69 = phi i64 [ %tmp66, %bb65 ], [ %tmp47, %bb45 ]
+ %tmp70 = add nuw i64 %tmp48, 1
+ %tmp71 = add nuw nsw i32 %tmp49, 1
+ %tmp72 = icmp slt i32 %tmp71, %tmp68
+ br i1 %tmp72, label %bb45, label %bb51
+
+bb73: ; preds = %bb73, %bb51
+ %tmp74 = phi i64 [ %tmp76, %bb73 ], [ 0, %bb51 ]
+ %tmp75 = phi i32 [ %tmp77, %bb73 ], [ 0, %bb51 ]
+ %tmp76 = add nuw i64 %tmp74, 1
+ tail call void @bar()
+ %tmp77 = add nuw nsw i32 %tmp75, 1
+ %tmp78 = load i32, i32* @c, align 4, !tbaa !2
+ %tmp79 = mul nsw i32 %tmp78, 100
+ %tmp80 = icmp slt i32 %tmp77, %tmp79
+ br i1 %tmp80, label %bb73, label %bb81
+
+bb81: ; preds = %bb73
+ br label %bb84
+
+bb84: ; preds = %bb81, %bb51, %bb8, %bb
+ ret i32 0
+}
+
+attributes #0 = { noinline }
+attributes #1 = { norecurse nounwind uwtable }
+
+!llvm.module.flags = !{!0}
+!llvm.ident = !{!1}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{!"clang version 5.0.0 (trunk 307355)"}
+!2 = !{!3, !3, i64 0}
+!3 = !{!"int", !4, i64 0}
+!4 = !{!"omnipotent char", !5, i64 0}
+!5 = !{!"Simple C/C++ TBAA"}
diff --git a/test/Transforms/SimplifyCFG/implied-and-or.ll b/test/Transforms/SimplifyCFG/implied-and-or.ll
new file mode 100644
index 000000000000..e615f302feef
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/implied-and-or.ll
@@ -0,0 +1,183 @@
+; RUN: opt %s -S -simplifycfg | FileCheck %s
+
+declare void @foo()
+declare void @bar()
+
+
+; CHECK-LABEL: @test_and1
+; CHECK: taken:
+; CHECK-NOT: cmp3
+; CHECK: call void @bar()
+; CHECK-NEXT: call void @foo()
+; CHECK: ret
+define void @test_and1(i32 %a, i32 %b) {
+entry:
+ %cmp1 = icmp eq i32 %a, 0
+ %cmp2 = icmp eq i32 %b, 0
+ %and = and i1 %cmp1, %cmp2
+ br i1 %and, label %taken, label %end
+
+taken:
+ call void @bar()
+ %cmp3 = icmp eq i32 %a, 0 ;; <-- implied true
+ br i1 %cmp3, label %if.then, label %end
+
+if.then:
+ call void @foo()
+ br label %end
+
+end:
+ ret void
+}
+
+; We can't infer anything if the result of the 'and' is false
+; CHECK-LABEL: @test_and2
+; CHECK: taken:
+; CHECK: call void @bar()
+; CHECK: %cmp3
+; CHECK: br i1 %cmp3
+; CHECK: if.then:
+; CHECK: call void @foo()
+; CHECK: ret
+define void @test_and2(i32 %a, i32 %b) {
+entry:
+ %cmp1 = icmp eq i32 %a, 0
+ %cmp2 = icmp eq i32 %b, 0
+ %and = and i1 %cmp1, %cmp2
+ br i1 %and, label %end, label %taken
+
+taken:
+ call void @bar()
+ %cmp3 = icmp eq i32 %a, 0
+ br i1 %cmp3, label %if.then, label %end
+
+if.then:
+ call void @foo()
+ br label %end
+
+end:
+ ret void
+}
+
+; CHECK-LABEL: @test_or1
+; CHECK: taken:
+; CHECK-NOT: cmp3
+; CHECK: call void @bar()
+; CHECK-NEXT: call void @foo()
+; CHECK: ret
+define void @test_or1(i32 %a, i32 %b) {
+entry:
+ %cmp1 = icmp eq i32 %a, 0
+ %cmp2 = icmp eq i32 %b, 0
+ %or = or i1 %cmp1, %cmp2
+ br i1 %or, label %end, label %taken
+
+taken:
+ call void @bar()
+ %cmp3 = icmp ne i32 %a, 0 ;; <-- implied true
+ br i1 %cmp3, label %if.then, label %end
+
+if.then:
+ call void @foo()
+ br label %end
+
+end:
+ ret void
+}
+
+; We can't infer anything if the result of the 'or' is true
+; CHECK-LABEL: @test_or2
+; CHECK: call void @bar()
+; CHECK: %cmp3
+; CHECK: br i1 %cmp3
+; CHECK: if.then:
+; CHECK: call void @foo()
+; CHECK: ret
+define void @test_or2(i32 %a, i32 %b) {
+entry:
+ %cmp1 = icmp eq i32 %a, 0
+ %cmp2 = icmp eq i32 %b, 0
+ %or = or i1 %cmp1, %cmp2
+ br i1 %or, label %taken, label %end
+
+taken:
+ call void @bar()
+ %cmp3 = icmp eq i32 %a, 0
+ br i1 %cmp3, label %if.then, label %end
+
+if.then:
+ call void @foo()
+ br label %end
+
+end:
+ ret void
+}
+
+; We can recurse a tree of 'and' or 'or's.
+; CHECK-LABEL: @test_and_recurse1
+; CHECK: taken:
+; CHECK-NEXT: call void @bar()
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: br label %end
+; CHECK: ret
+define void @test_and_recurse1(i32 %a, i32 %b, i32 %c) {
+entry:
+ %cmpa = icmp eq i32 %a, 0
+ %cmpb = icmp eq i32 %b, 0
+ %cmpc = icmp eq i32 %c, 0
+ %and1 = and i1 %cmpa, %cmpb
+ %and2 = and i1 %and1, %cmpc
+ br i1 %and2, label %taken, label %end
+
+taken:
+ call void @bar()
+ %cmp3 = icmp eq i32 %a, 0
+ br i1 %cmp3, label %if.then, label %end
+
+if.then:
+ call void @foo()
+ br label %end
+
+end:
+ ret void
+}
+
+; Check to make sure we don't recurse too deep.
+; CHECK-LABEL: @test_and_recurse2
+; CHECK: taken:
+; CHECK-NEXT: call void @bar()
+; CHECK-NEXT: %cmp3 = icmp eq i32 %a, 0
+; CHECK-NEXT: br i1 %cmp3, label %if.then, label %end
+; CHECK: ret
+define void @test_and_recurse2(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
+ i32 %g, i32 %h) {
+entry:
+ %cmpa = icmp eq i32 %a, 0
+ %cmpb = icmp eq i32 %b, 0
+ %cmpc = icmp eq i32 %c, 0
+ %cmpd = icmp eq i32 %d, 0
+ %cmpe = icmp eq i32 %e, 0
+ %cmpf = icmp eq i32 %f, 0
+ %cmpg = icmp eq i32 %g, 0
+ %cmph = icmp eq i32 %h, 0
+ %and1 = and i1 %cmpa, %cmpb
+ %and2 = and i1 %and1, %cmpc
+ %and3 = and i1 %and2, %cmpd
+ %and4 = and i1 %and3, %cmpe
+ %and5 = and i1 %and4, %cmpf
+ %and6 = and i1 %and5, %cmpg
+ %and7 = and i1 %and6, %cmph
+ br i1 %and7, label %taken, label %end
+
+taken:
+ call void @bar()
+ %cmp3 = icmp eq i32 %a, 0 ; <-- can be implied true
+ br i1 %cmp3, label %if.then, label %end
+
+if.then:
+ call void @foo()
+ br label %end
+
+end:
+ ret void
+}
diff --git a/test/Transforms/SimplifyCFG/sink-common-code.ll b/test/Transforms/SimplifyCFG/sink-common-code.ll
index 0f7bfa8516c9..513da477607b 100644
--- a/test/Transforms/SimplifyCFG/sink-common-code.ll
+++ b/test/Transforms/SimplifyCFG/sink-common-code.ll
@@ -818,6 +818,30 @@ merge:
; CHECK: right:
; CHECK-NEXT: %val1 = call i32 @call_target() [ "deopt"(i32 20) ]
+%T = type {i32, i32}
+
+define i32 @test_insertvalue(i1 zeroext %flag, %T %P) {
+entry:
+ br i1 %flag, label %if.then, label %if.else
+
+if.then:
+ %t1 = insertvalue %T %P, i32 0, 0
+ br label %if.end
+
+if.else:
+ %t2 = insertvalue %T %P, i32 1, 0
+ br label %if.end
+
+if.end:
+ %t = phi %T [%t1, %if.then], [%t2, %if.else]
+ ret i32 1
+}
+
+; CHECK-LABEL: @test_insertvalue
+; CHECK: select
+; CHECK: insertvalue
+; CHECK-NOT: insertvalue
+
; CHECK: ![[TBAA]] = !{![[TYPE:[0-9]]], ![[TYPE]], i64 0}
; CHECK: ![[TYPE]] = !{!"float", ![[TEXT:[0-9]]]}
; CHECK: ![[TEXT]] = !{!"an example type tree"}
diff --git a/test/Transforms/Sink/fence.ll b/test/Transforms/Sink/fence.ll
index aa237d8192b6..09aa565d88f8 100644
--- a/test/Transforms/Sink/fence.ll
+++ b/test/Transforms/Sink/fence.ll
@@ -5,9 +5,9 @@ target triple = "x86_64-unknown-linux-gnu"
define void @test1(i32* ()*) {
entry:
%1 = call i32* %0() #0
- fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
%2 = load i32, i32* %1, align 4
- fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
%3 = icmp eq i32 %2, 0
br i1 %3, label %fail, label %pass
@@ -20,9 +20,9 @@ pass: ; preds = %fail, %top
; CHECK-LABEL: @test1(
; CHECK: %[[call:.*]] = call i32* %0()
-; CHECK: fence singlethread seq_cst
+; CHECK: fence syncscope("singlethread") seq_cst
; CHECK: load i32, i32* %[[call]], align 4
-; CHECK: fence singlethread seq_cst
+; CHECK: fence syncscope("singlethread") seq_cst
attributes #0 = { nounwind readnone }
diff --git a/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll b/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll
new file mode 100644
index 000000000000..661d0739401a
--- /dev/null
+++ b/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll
@@ -0,0 +1,37 @@
+; Test for a bug specific to the new pass manager where we may build a domtree
+; to make more precise AA queries for functions.
+;
+; RUN: opt -aa-pipeline=default -passes='no-op-module' -debug-pass-manager -thinlto-bc -o %t %s
+; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
+; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.hoge = type { %struct.widget }
+%struct.widget = type { i32 (...)** }
+
+; M0: @global = local_unnamed_addr global
+; M1-NOT: @global
+@global = local_unnamed_addr global %struct.hoge { %struct.widget { i32 (...)** bitcast (i8** getelementptr inbounds ({ [3 x i8*] }, { [3 x i8*] }* @global.1, i32 0, inrange i32 0, i32 2) to i32 (...)**) } }, align 8
+
+; M0: @global.1 = external unnamed_addr constant
+; M1: @global.1 = linkonce_odr unnamed_addr constant
+@global.1 = linkonce_odr unnamed_addr constant { [3 x i8*] } { [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @global.4 to i8*), i8* bitcast (i32 (%struct.widget*)* @quux to i8*)] }, align 8, !type !0
+
+; M0: @global.2 = external global
+; M1-NOT: @global.2
+@global.2 = external global i8*
+
+; M0: @global.3 = linkonce_odr constant
+; M1-NOT: @global.3
+@global.3 = linkonce_odr constant [22 x i8] c"zzzzzzzzzzzzzzzzzzzzz\00"
+
+; M0: @global.4 = linkonce_odr constant
+; M1: @global.4 = external constant
+@global.4 = linkonce_odr constant { i8*, i8* }{ i8* bitcast (i8** getelementptr inbounds (i8*, i8** @global.2, i64 2) to i8*), i8* getelementptr inbounds ([22 x i8], [22 x i8]* @global.3, i32 0, i32 0) }
+
+@llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
+
+declare i32 @quux(%struct.widget*) unnamed_addr
+
+!0 = !{i64 16, !"yyyyyyyyyyyyyyyyyyyyyyyyy"}
diff --git a/test/Unit/lit.cfg b/test/Unit/lit.cfg
index dac0bf829ba6..9da82f5f2c9b 100644
--- a/test/Unit/lit.cfg
+++ b/test/Unit/lit.cfg
@@ -3,6 +3,7 @@
# Configuration file for the 'lit' test runner.
import os
+import subprocess
import lit.formats
@@ -75,8 +76,8 @@ if config.test_exec_root is None:
lit_config.fatal('No site specific configuration available!')
# Get the source and object roots.
- llvm_src_root = lit.util.capture(['llvm-config', '--src-root']).strip()
- llvm_obj_root = lit.util.capture(['llvm-config', '--obj-root']).strip()
+ llvm_src_root = subprocess.check_output(['llvm-config', '--src-root']).strip()
+ llvm_obj_root = subprocess.check_output(['llvm-config', '--obj-root']).strip()
# Validate that we got a tree which points to here.
this_src_root = os.path.join(os.path.dirname(__file__),'..','..')
diff --git a/test/Verifier/2004-05-21-SwitchConstantMismatch.ll b/test/Verifier/2004-05-21-SwitchConstantMismatch.ll
index 339a21cac190..fea290d74c4a 100644
--- a/test/Verifier/2004-05-21-SwitchConstantMismatch.ll
+++ b/test/Verifier/2004-05-21-SwitchConstantMismatch.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
diff --git a/test/Verifier/2007-12-21-InvokeParamAttrs.ll b/test/Verifier/2007-12-21-InvokeParamAttrs.ll
index 709b47b33daa..c62bc0f4e190 100644
--- a/test/Verifier/2007-12-21-InvokeParamAttrs.ll
+++ b/test/Verifier/2007-12-21-InvokeParamAttrs.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
declare void @foo(i8*)
diff --git a/test/Verifier/2008-01-11-VarargAttrs.ll b/test/Verifier/2008-01-11-VarargAttrs.ll
index af97ce647449..d3eb7c72699a 100644
--- a/test/Verifier/2008-01-11-VarargAttrs.ll
+++ b/test/Verifier/2008-01-11-VarargAttrs.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
%struct = type { }
diff --git a/test/Verifier/2009-05-29-InvokeResult1.ll b/test/Verifier/2009-05-29-InvokeResult1.ll
index bb815b3bfe15..38679f4c49fc 100644
--- a/test/Verifier/2009-05-29-InvokeResult1.ll
+++ b/test/Verifier/2009-05-29-InvokeResult1.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
declare i32 @v()
diff --git a/test/Verifier/2009-05-29-InvokeResult2.ll b/test/Verifier/2009-05-29-InvokeResult2.ll
index 900b1d827bf4..92a51d71efe6 100644
--- a/test/Verifier/2009-05-29-InvokeResult2.ll
+++ b/test/Verifier/2009-05-29-InvokeResult2.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
declare i32 @v()
diff --git a/test/Verifier/2009-05-29-InvokeResult3.ll b/test/Verifier/2009-05-29-InvokeResult3.ll
index 050de4669d35..3fff219cab7d 100644
--- a/test/Verifier/2009-05-29-InvokeResult3.ll
+++ b/test/Verifier/2009-05-29-InvokeResult3.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
declare i32 @v()
diff --git a/test/Verifier/byval-1.ll b/test/Verifier/byval-1.ll
index 9bbead086114..9d09a0ffb117 100644
--- a/test/Verifier/byval-1.ll
+++ b/test/Verifier/byval-1.ll
@@ -1,2 +1,2 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
declare void @h(i32 byval %num)
diff --git a/test/Verifier/element-wise-atomic-memory-intrinsics.ll b/test/Verifier/element-wise-atomic-memory-intrinsics.ll
index 470c861c5057..81c8ba16b97d 100644
--- a/test/Verifier/element-wise-atomic-memory-intrinsics.ll
+++ b/test/Verifier/element-wise-atomic-memory-intrinsics.ll
@@ -22,4 +22,46 @@ define void @test_memcpy(i8* %P, i8* %Q, i32 %A, i32 %E) {
ret void
}
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
+
+define void @test_memmove(i8* %P, i8* %Q, i32 %A, i32 %E) {
+ ; CHECK: element size of the element-wise unordered atomic memory intrinsic must be a constant int
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1, i32 %E)
+ ; CHECK: element size of the element-wise atomic memory intrinsic must be a power of 2
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1, i32 3)
+
+ ; CHECK: constant length must be a multiple of the element size in the element-wise atomic memory intrinsic
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 7, i32 4)
+
+ ; CHECK: incorrect alignment of the destination argument
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* %P, i8* align 4 %Q, i32 1, i32 1)
+ ; CHECK: incorrect alignment of the destination argument
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %P, i8* align 4 %Q, i32 4, i32 4)
+
+ ; CHECK: incorrect alignment of the source argument
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* %Q, i32 1, i32 1)
+ ; CHECK: incorrect alignment of the source argument
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 1 %Q, i32 4, i32 4)
+
+ ret void
+}
+declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
+
+define void @test_memset(i8* %P, i8 %V, i32 %A, i32 %E) {
+ ; CHECK: element size of the element-wise unordered atomic memory intrinsic must be a constant int
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 1, i32 %E)
+ ; CHECK: element size of the element-wise atomic memory intrinsic must be a power of 2
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 1, i32 3)
+
+ ; CHECK: constant length must be a multiple of the element size in the element-wise atomic memory intrinsic
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 7, i32 4)
+
+ ; CHECK: incorrect alignment of the destination argument
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* %P, i8 %V, i32 1, i32 1)
+ ; CHECK: incorrect alignment of the destination argument
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %P, i8 %V, i32 4, i32 4)
+
+ ret void
+}
+declare void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nocapture, i8, i32, i32) nounwind
+
; CHECK: input module is broken!
diff --git a/test/Verifier/gcread-ptrptr.ll b/test/Verifier/gcread-ptrptr.ll
index 4ed22fa6c24e..f8b21bfb4c93 100644
--- a/test/Verifier/gcread-ptrptr.ll
+++ b/test/Verifier/gcread-ptrptr.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
; PR1633
%meta = type { i8* }
diff --git a/test/Verifier/gcroot-alloca.ll b/test/Verifier/gcroot-alloca.ll
index 8caa4b9f58b5..775bde78250e 100644
--- a/test/Verifier/gcroot-alloca.ll
+++ b/test/Verifier/gcroot-alloca.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
; PR1633
%meta = type { i8* }
diff --git a/test/Verifier/gcroot-meta.ll b/test/Verifier/gcroot-meta.ll
index 1836f61c7ad6..26f7b5156294 100644
--- a/test/Verifier/gcroot-meta.ll
+++ b/test/Verifier/gcroot-meta.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
; PR1633
%meta = type { i8* }
diff --git a/test/Verifier/gcroot-ptrptr.ll b/test/Verifier/gcroot-ptrptr.ll
index b573295e3e94..8d7557d75a49 100644
--- a/test/Verifier/gcroot-ptrptr.ll
+++ b/test/Verifier/gcroot-ptrptr.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
; PR1633
%meta = type { i8* }
diff --git a/test/Verifier/gcwrite-ptrptr.ll b/test/Verifier/gcwrite-ptrptr.ll
index 1f60becc3327..dec1e6bcd334 100644
--- a/test/Verifier/gcwrite-ptrptr.ll
+++ b/test/Verifier/gcwrite-ptrptr.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as < %s >& /dev/null
+; RUN: not llvm-as < %s > /dev/null 2>&1
; PR1633
%meta = type { i8* }
diff --git a/test/lit.cfg b/test/lit.cfg
index ed1ba2d11b1a..8ed9187aea77 100644
--- a/test/lit.cfg
+++ b/test/lit.cfg
@@ -6,6 +6,7 @@ import os
import sys
import re
import platform
+import subprocess
import lit.util
import lit.formats
@@ -150,8 +151,8 @@ if config.test_exec_root is None:
lit_config.fatal('No site specific configuration available!')
# Get the source and object roots.
- llvm_src_root = lit.util.capture(['llvm-config', '--src-root']).strip()
- llvm_obj_root = lit.util.capture(['llvm-config', '--obj-root']).strip()
+ llvm_src_root = subprocess.check_output(['llvm-config', '--src-root']).strip()
+ llvm_obj_root = subprocess.check_output(['llvm-config', '--obj-root']).strip()
# Validate that we got a tree which points to here.
this_src_root = os.path.dirname(config.test_source_root)
diff --git a/test/tools/llvm-cov/threads.c b/test/tools/llvm-cov/threads.c
new file mode 100644
index 000000000000..00a85edb7ce8
--- /dev/null
+++ b/test/tools/llvm-cov/threads.c
@@ -0,0 +1,11 @@
+// Coverage/profile data recycled from the showLineExecutionCounts.cpp test.
+//
+// RUN: llvm-profdata merge %S/Inputs/lineExecutionCounts.proftext -o %t.profdata
+// RUN: llvm-cov show %S/Inputs/lineExecutionCounts.covmapping -j 1 -o %t1.dir -instr-profile %t.profdata -filename-equivalence %S/showLineExecutionCounts.cpp
+// RUN: llvm-cov show %S/Inputs/lineExecutionCounts.covmapping -num-threads 2 -o %t2.dir -instr-profile %t.profdata -filename-equivalence %S/showLineExecutionCounts.cpp
+// RUN: llvm-cov show %S/Inputs/lineExecutionCounts.covmapping -o %t3.dir -instr-profile %t.profdata -filename-equivalence %S/showLineExecutionCounts.cpp
+//
+// RUN: diff %t1.dir/index.txt %t2.dir/index.txt
+// RUN: diff %t1.dir/coverage/tmp/showLineExecutionCounts.cpp.txt %t2.dir/coverage/tmp/showLineExecutionCounts.cpp.txt
+// RUN: diff %t1.dir/index.txt %t3.dir/index.txt
+// RUN: diff %t1.dir/coverage/tmp/showLineExecutionCounts.cpp.txt %t3.dir/coverage/tmp/showLineExecutionCounts.cpp.txt
diff --git a/test/tools/llvm-cov/zeroFunctionFile.c b/test/tools/llvm-cov/zeroFunctionFile.c
index 87b6ecd3abb3..d5b983efb817 100644
--- a/test/tools/llvm-cov/zeroFunctionFile.c
+++ b/test/tools/llvm-cov/zeroFunctionFile.c
@@ -13,7 +13,7 @@ int main() {
// REPORT: 0 0 - 0 0 - 0 0 - 0 0 -
// REPORT-NO: 0%
-// RUN: llvm-cov show %S/Inputs/zeroFunctionFile.covmapping -format html -instr-profile %t.profdata -o %t.dir
+// RUN: llvm-cov show -j 1 %S/Inputs/zeroFunctionFile.covmapping -format html -instr-profile %t.profdata -o %t.dir
// RUN: FileCheck %s -input-file=%t.dir/index.html -check-prefix=HTML
// HTML: <td class='column-entry-green'><pre>- (0/0)
// HTML-NO: 0.00% (0/0)
diff --git a/test/tools/llvm-objdump/ARM/Inputs/reloc-half.obj.macho-arm b/test/tools/llvm-objdump/ARM/Inputs/reloc-half.obj.macho-arm
new file mode 100644
index 000000000000..79d19962e00b
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/Inputs/reloc-half.obj.macho-arm
Binary files differ
diff --git a/test/tools/llvm-objdump/ARM/macho-reloc-half.test b/test/tools/llvm-objdump/ARM/macho-reloc-half.test
new file mode 100644
index 000000000000..888c7f589116
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/macho-reloc-half.test
@@ -0,0 +1,4 @@
+RUN: llvm-objdump -r %p/Inputs/reloc-half.obj.macho-arm | FileCheck %s
+
+CHECK-DAG: 00000004 ARM_RELOC_HALF :upper16:(_stringbuf)
+CHECK-DAG: 00000000 ARM_RELOC_HALF :lower16:(_stringbuf)
diff --git a/test/tools/llvm-objdump/Inputs/test.wasm b/test/tools/llvm-objdump/Inputs/test.wasm
deleted file mode 100644
index d3906eeaf6f8..000000000000
--- a/test/tools/llvm-objdump/Inputs/test.wasm
+++ /dev/null
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/trivial.ll b/test/tools/llvm-objdump/Inputs/trivial.ll
new file mode 100644
index 000000000000..6dd510a12b66
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/trivial.ll
@@ -0,0 +1,19 @@
+; Input used for generating checked-in binaries (trivial.obj.*)
+; llc -mtriple=wasm32-unknown-unknown-wasm trivial.ll -filetype=obj -o trivial.obj.wasm
+
+@.str = private unnamed_addr constant [13 x i8] c"Hello World\0A\00", align 1
+
+define i32 @main() nounwind {
+entry:
+ %call = tail call i32 @puts(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0)) nounwind
+ tail call void bitcast (void (...)* @SomeOtherFunction to void ()*)() nounwind
+ ret i32 0
+}
+
+declare i32 @puts(i8* nocapture) nounwind
+
+declare void @SomeOtherFunction(...)
+
+@var = global i32 0
+@llvm.used = appending global [1 x i8*] [i8* bitcast (i32* @var to i8*)], section "llvm.metadata"
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* null, i8* null }]
diff --git a/test/tools/llvm-objdump/Inputs/trivial.obj.wasm b/test/tools/llvm-objdump/Inputs/trivial.obj.wasm
new file mode 100644
index 000000000000..1f3947ac472e
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/trivial.obj.wasm
Binary files differ
diff --git a/test/tools/llvm-objdump/WebAssembly/symbol-table.test b/test/tools/llvm-objdump/WebAssembly/symbol-table.test
index 8936c7a12e4c..2c49d5d65c5d 100644
--- a/test/tools/llvm-objdump/WebAssembly/symbol-table.test
+++ b/test/tools/llvm-objdump/WebAssembly/symbol-table.test
@@ -1,8 +1,11 @@
-RUN: llvm-objdump -t %p/../Inputs/test.wasm | FileCheck %s
+RUN: llvm-objdump -t %p/../Inputs/trivial.obj.wasm | FileCheck %s
+
+CHECK: SYMBOL TABLE:
+CHECK-NEXT: 00000000 l F IMPORT puts
+CHECK-NEXT: 00000000 l F IMPORT SomeOtherFunction
+CHECK-NEXT: 00000002 g F EXPORT main
+CHECK-NEXT: 00000001 g EXPORT var
+CHECK-NEXT: 00000000 l F name puts
+CHECK-NEXT: 00000001 l F name SomeOtherFunction
+CHECK-NEXT: 00000002 l F name main
-CHECK: SYMBOL TABLE:
-CHECK: 00000000 l F IMPORT bar
-CHECK: 00000000 g F EXPORT baz
-CHECK: 00000001 g F EXPORT quux
-CHECK: 00000000 l F name $import
-CHECK: 00000001 l F name $func0
diff --git a/test/tools/llvm-objdump/wasm.txt b/test/tools/llvm-objdump/wasm.txt
index 4aa40c6c9df8..828fa34b2b46 100644
--- a/test/tools/llvm-objdump/wasm.txt
+++ b/test/tools/llvm-objdump/wasm.txt
@@ -1,24 +1,27 @@
-# RUN: llvm-objdump -h %p/Inputs/test.wasm | FileCheck %s
+# RUN: llvm-objdump -h %p/Inputs/trivial.obj.wasm | FileCheck %s
-# CHECK: Sections:
-# CHECK: Idx Name Size Address Type
-# CHECK: 0 TYPE 0000000f 0000000000000000
-# CHECK: 1 IMPORT 0000000b 0000000000000000
-# CHECK: 2 FUNCTION 00000003 0000000000000000
-# CHECK: 3 TABLE 00000005 0000000000000000
-# CHECK: 4 EXPORT 0000000e 0000000000000000
-# CHECK: 5 ELEM 00000007 0000000000000000
-# CHECK: 6 CODE 0000002a 0000000000000000 TEXT
-# CHECK: 7 name 0000003c 0000000000000000
+# CHECK: Sections:
+# CHECK-NEXT: Idx Name Size Address Type
+# CHECK-NEXT: 0 TYPE 0000000e 0000000000000000
+# CHECK-NEXT: 1 IMPORT 00000024 0000000000000000
+# CHECK-NEXT: 2 FUNCTION 00000002 0000000000000000
+# CHECK-NEXT: 3 TABLE 00000004 0000000000000000
+# CHECK-NEXT: 4 MEMORY 00000003 0000000000000000
+# CHECK-NEXT: 5 GLOBAL 0000000b 0000000000000000
+# CHECK-NEXT: 6 EXPORT 0000000e 0000000000000000
+# CHECK-NEXT: 7 CODE 00000019 0000000000000000 TEXT
+# CHECK-NEXT: 8 DATA 0000001a 0000000000000000 DATA
+# CHECK-NEXT: 9 name 0000002b 0000000000000000
+# CHECK-NEXT: 10 reloc.CODE 00000017 0000000000000000
+# CHECK-NEXT: 11 linking 00000016 0000000000000000
-# RUN: llvm-objdump -p %p/Inputs/test.wasm | FileCheck %s -check-prefix CHECK-HEADER
+# RUN: llvm-objdump -p %p/Inputs/trivial.obj.wasm | FileCheck %s -check-prefix CHECK-HEADER
# CHECK-HEADER: Program Header:
# CHECK-HEADER: Version: 0x1
-# RUN: llvm-objdump -s --section=CODE %p/Inputs/test.wasm | FileCheck %s -check-prefix CHECK-SECTIONS
+# RUN: llvm-objdump -s --section=CODE %p/Inputs/trivial.obj.wasm | FileCheck %s -check-prefix CHECK-SECTIONS
# CHECK-SECTIONS: Contents of section CODE:
-# CHECK-SECTIONS: 0000 02070043 0000803f 0b200201 7d017c10 ...C...?. ..}.|.
-# CHECK-SECTIONS: 0010 001a4100 10011a41 00410111 00001a20 ..A....A.A.....
-# CHECK-SECTIONS: 0020 011a4300 00000021 020b ..C....!..
+# CHECK-SECTIONS: 0000 01170041 80808080 00108080 8080001a ...A............
+# CHECK-SECTIONS: 0010 10818080 80004100 0b ......A..
diff --git a/test/tools/llvm-pdbdump/partial-type-stream.test b/test/tools/llvm-pdbdump/partial-type-stream.test
index 3a853c391450..7c62acce7ad4 100644
--- a/test/tools/llvm-pdbdump/partial-type-stream.test
+++ b/test/tools/llvm-pdbdump/partial-type-stream.test
@@ -17,8 +17,7 @@ DEPS: Types (TPI Stream)
DEPS-NEXT: ============================================================
DEPS-NEXT: Showing 1 records and their dependents (4 records total)
DEPS-NEXT: 0x100E | LF_ARGLIST [size = 8]
-DEPS-NEXT: 0x1017 | LF_CLASS [size = 60]
-DEPS-NEXT: class name: `MembersTest::A`
+DEPS-NEXT: 0x1017 | LF_CLASS [size = 60] `MembersTest::A`
DEPS-NEXT: unique name: `.?AVA@MembersTest@@`
DEPS-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
DEPS-NEXT: options: forward ref | has unique name
diff --git a/test/tools/llvm-profdata/c-general.test b/test/tools/llvm-profdata/c-general.test
index 0ec7c113eb4c..ddb95d1260d8 100644
--- a/test/tools/llvm-profdata/c-general.test
+++ b/test/tools/llvm-profdata/c-general.test
@@ -10,6 +10,7 @@ REGENERATE: $ clang -o a.out -fprofile-instr-generate $CFE_TESTDIR/c-general.c
REGENERATE: $ LLVM_PROFILE_FILE=$TESTDIR/Inputs/c-general.profraw ./a.out
RUN: llvm-profdata show %p/Inputs/c-general.profraw -o - | FileCheck %s
+RUN: llvm-profdata show %p/Inputs/c-general.profraw --topn=3 -o - | FileCheck %s --check-prefix=TOPN
RUN: llvm-profdata show %p/Inputs/c-general.profraw -o - --function=switches | FileCheck %s -check-prefix=SWITCHES -check-prefix=CHECK
SWITCHES-LABEL: Counters:
@@ -22,3 +23,6 @@ SWITCHES-LABEL: Functions shown: 1
CHECK-LABEL: Total functions: 12
CHECK-NEXT: Maximum function count: 1
CHECK-NEXT: Maximum internal block count: 100
+TOPN: boolean_operators, max count = 100
+TOPN-NEXT: simple_loops, max count = 100
+TOPN-NEXT: conditionals, max count = 100
diff --git a/test/tools/llvm-readobj/Inputs/trivial.ll b/test/tools/llvm-readobj/Inputs/trivial.ll
index f79b8b897691..e0e519d064de 100644
--- a/test/tools/llvm-readobj/Inputs/trivial.ll
+++ b/test/tools/llvm-readobj/Inputs/trivial.ll
@@ -1,9 +1,11 @@
-; llc -mtriple=i386-pc-win32 trivial.ll -filetype=obj -o trivial-object-test.coff-i386
-; llc -mtriple=x86_64-pc-win32 trivial.ll -filetype=obj -o trivial-object-test.coff-x86-64
-; llc -mtriple=i386-linux-gnu trivial.ll -filetype=obj -o trivial-object-test.elf-i386 -relocation-model=pic
-; llc -mtriple=x86_64-linux-gnu trivial.ll -filetype=obj -o trivial-object-test.elf-x86-64 -relocation-model=pic
-; llc -mtriple=i386-apple-darwin10 trivial.ll -filetype=obj -o trivial-object-test.macho-i386 -relocation-model=pic
-; llc -mtriple=x86_64-apple-darwin10 trivial.ll -filetype=obj -o trivial-object-test.macho-x86-64 -relocation-model=pic
+; Input used for generating checked-in binaries (trivial.obj.*)
+; llc -mtriple=i386-pc-win32 trivial.ll -filetype=obj -o trivial.obj.coff-i386
+; llc -mtriple=x86_64-pc-win32 trivial.ll -filetype=obj -o trivial.obj.coff-x86-64
+; llc -mtriple=i386-linux-gnu trivial.ll -filetype=obj -o trivial.obj.elf-i386 -relocation-model=pic
+; llc -mtriple=x86_64-linux-gnu trivial.ll -filetype=obj -o trivial.obj.elf-x86-64 -relocation-model=pic
+; llc -mtriple=i386-apple-darwin10 trivial.ll -filetype=obj -o trivial.obj.macho-i386 -relocation-model=pic
+; llc -mtriple=x86_64-apple-darwin10 trivial.ll -filetype=obj -o trivial.obj.macho-x86-64 -relocation-model=pic
+; llc -mtriple=wasm32-unknown-unknown-wasm trivial.ll -filetype=obj -o trivial.obj.wasm
@.str = private unnamed_addr constant [13 x i8] c"Hello World\0A\00", align 1
diff --git a/test/tools/llvm-readobj/Inputs/trivial.obj.wasm b/test/tools/llvm-readobj/Inputs/trivial.obj.wasm
index f14192f1798b..caa702f70015 100644
--- a/test/tools/llvm-readobj/Inputs/trivial.obj.wasm
+++ b/test/tools/llvm-readobj/Inputs/trivial.obj.wasm
Binary files differ
diff --git a/test/tools/llvm-readobj/codeview-linetables.test b/test/tools/llvm-readobj/codeview-linetables.test
index fe68e7efdb05..9256aefe4330 100644
--- a/test/tools/llvm-readobj/codeview-linetables.test
+++ b/test/tools/llvm-readobj/codeview-linetables.test
@@ -41,7 +41,7 @@ MFUN32: ]
MFUN32: Subsection [
MFUN32-NEXT: SubSectionType: Symbols (0xF1)
MFUN32-NEXT: SubSectionSize: 0x4B
-MFUN32: ProcStart {
+MFUN32: GlobalProcIdSym {
MFUN32: CodeSize: 0xA
MFUN32: DisplayName: x
MFUN32: LinkageName: _x
@@ -60,7 +60,7 @@ MFUN32: ]
MFUN32: Subsection [
MFUN32-NEXT: SubSectionType: Symbols (0xF1)
MFUN32-NEXT: SubSectionSize: 0x4B
-MFUN32: ProcStart {
+MFUN32: GlobalProcIdSym {
MFUN32: CodeSize: 0xA
MFUN32: DisplayName: y
MFUN32: LinkageName: _y
@@ -79,7 +79,7 @@ MFUN32: ]
MFUN32: Subsection [
MFUN32-NEXT: SubSectionType: Symbols (0xF1)
MFUN32-NEXT: SubSectionSize: 0x4B
-MFUN32: ProcStart {
+MFUN32: GlobalProcIdSym {
MFUN32: CodeSize: 0x14
MFUN32: DisplayName: f
MFUN32: LinkageName: _f
@@ -193,7 +193,7 @@ MFUN64: ]
MFUN64: Subsection [
MFUN64-NEXT: SubSectionType: Symbols (0xF1)
MFUN64-NEXT: SubSectionSize: 0x4B
-MFUN64: ProcStart {
+MFUN64: GlobalProcIdSym {
MFUN64: CodeSize: 0xE
MFUN64: DisplayName: x
MFUN64: LinkageName: x
@@ -208,7 +208,7 @@ MFUN64-NEXT: ]
MFUN64-NEXT: Subsection [
MFUN64-NEXT: SubSectionType: Symbols (0xF1)
MFUN64-NEXT: SubSectionSize: 0x4B
-MFUN64: ProcStart {
+MFUN64: GlobalProcIdSym {
MFUN64: CodeSize: 0xE
MFUN64: DisplayName: y
MFUN64: LinkageName: y
@@ -223,7 +223,7 @@ MFUN64-NEXT: ]
MFUN64-NEXT: Subsection [
MFUN64-NEXT: SubSectionType: Symbols (0xF1)
MFUN64-NEXT: SubSectionSize: 0x4B
-MFUN64: ProcStart {
+MFUN64: GlobalProcIdSym {
MFUN64: CodeSize: 0x18
MFUN64: DisplayName: f
MFUN64: LinkageName: f
@@ -365,7 +365,7 @@ MFILE32: ]
MFILE32: Subsection [
MFILE32-NEXT: SubSectionType: Symbols (0xF1)
MFILE32-NEXT: SubSectionSize: 0x4B
-MFILE32: ProcStart {
+MFILE32: GlobalProcIdSym {
MFILE32: CodeSize: 0x14
MFILE32: DisplayName: f
MFILE32: LinkageName: _f
@@ -442,7 +442,7 @@ MFILE64: ]
MFILE64: Subsection [
MFILE64-NEXT: SubSectionType: Symbols (0xF1)
MFILE64-NEXT: SubSectionSize: 0x4B
-MFILE64: ProcStart {
+MFILE64: GlobalProcIdSym {
MFILE64: CodeSize: 0x18
MFILE64: DisplayName: f
MFILE64: LinkageName: f
@@ -528,7 +528,7 @@ RUN: | FileCheck %s -check-prefix MCOMDAT
RUN: llvm-readobj -s -codeview -section-symbols %p/Inputs/comdat-function-linetables.obj.coff-2013-i386 \
RUN: | FileCheck %s -check-prefix MCOMDAT
-MCOMDAT: ProcStart {
+MCOMDAT: GlobalProcIdSym {
MCOMDAT: CodeSize: 0x7
MCOMDAT: DisplayName: f
MCOMDAT: LinkageName: ?f@@YAHXZ
@@ -556,7 +556,7 @@ MCOMDAT-NEXT: IsStatement: Yes
MCOMDAT-NEXT: ]
MCOMDAT-NEXT: ]
MCOMDAT-NEXT: ]
-MCOMDAT: ProcStart {
+MCOMDAT: GlobalProcIdSym {
MCOMDAT: CodeSize: 0x7
MCOMDAT: DisplayName: g
MCOMDAT: LinkageName: ?g@@YAHXZ
diff --git a/test/tools/llvm-readobj/file-headers.test b/test/tools/llvm-readobj/file-headers.test
index 6bc9714f2037..65ccd50a2729 100644
--- a/test/tools/llvm-readobj/file-headers.test
+++ b/test/tools/llvm-readobj/file-headers.test
@@ -28,9 +28,6 @@ RUN: llvm-readobj -h %p/Inputs/magic.coff-importlib \
RUN: | FileCheck %s -check-prefix COFF-IMPORTLIB
RUN: llvm-readobj -h %p/Inputs/trivial.obj.elf-lanai \
RUN: | FileCheck %s -check-prefix ELF-LANAI
-# trivial.obj.wasm was generated using the following command:
-# echo "extern int bar, baz; int foo() { return bar + baz + (int)&foo; }" | \
-# ./bin/clang -c -o trivial.obj.wasm -target wasm32-unknown-unknown-wasm -x c -
RUN: llvm-readobj -h %p/Inputs/trivial.obj.wasm \
RUN: | FileCheck %s -check-prefix WASM
diff --git a/test/tools/llvm-readobj/relocations.test b/test/tools/llvm-readobj/relocations.test
index 9c7dcf1d659c..85ccd3cefa1b 100644
--- a/test/tools/llvm-readobj/relocations.test
+++ b/test/tools/llvm-readobj/relocations.test
@@ -289,21 +289,20 @@ MACHO-ARM-NEXT: ]
WASM: Relocations [
WASM-NEXT: Section (8) CODE {
WASM-NEXT: Relocation {
-WASM-NEXT: Type: R_WEBASSEMBLY_TABLE_INDEX_SLEB (1)
-WASM-NEXT: Offset: 0x6
+WASM-NEXT: Type: R_WEBASSEMBLY_GLOBAL_ADDR_SLEB (4)
+WASM-NEXT: Offset: 0x4
WASM-NEXT: Index: 0x0
+WASM-NEXT: Addend: 0
WASM-NEXT: }
WASM-NEXT: Relocation {
-WASM-NEXT: Type: R_WEBASSEMBLY_GLOBAL_ADDR_LEB (3)
-WASM-NEXT: Offset: 0x15
+WASM-NEXT: Type: R_WEBASSEMBLY_FUNCTION_INDEX_LEB (0)
+WASM-NEXT: Offset: 0xA
WASM-NEXT: Index: 0x0
-WASM-NEXT: Addend: 0
WASM-NEXT: }
WASM-NEXT: Relocation {
-WASM-NEXT: Type: R_WEBASSEMBLY_GLOBAL_ADDR_LEB (3)
-WASM-NEXT: Offset: 0x24
+WASM-NEXT: Type: R_WEBASSEMBLY_FUNCTION_INDEX_LEB (0)
+WASM-NEXT: Offset: 0x11
WASM-NEXT: Index: 0x1
-WASM-NEXT: Addend: 0
WASM-NEXT: }
WASM-NEXT: }
WASM-NEXT: ]
diff --git a/test/tools/llvm-readobj/sections.test b/test/tools/llvm-readobj/sections.test
index 1747ee45d4f3..4eda5dae882a 100644
--- a/test/tools/llvm-readobj/sections.test
+++ b/test/tools/llvm-readobj/sections.test
@@ -493,62 +493,75 @@ MACHO-ARM-NEXT: Reserved2: 0x0
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT:]
-WASM: Sections [
-WASM-NEXT: Section {
-WASM-NEXT: Type: TYPE (0x1)
-WASM-NEXT: Size: 5
-WASM-NEXT: Offset: 8
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: IMPORT (0x2)
-WASM-NEXT: Size: 23
-WASM-NEXT: Offset: 19
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: FUNCTION (0x3)
-WASM-NEXT: Size: 2
-WASM-NEXT: Offset: 48
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: TABLE (0x4)
-WASM-NEXT: Size: 4
-WASM-NEXT: Offset: 56
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: MEMORY (0x5)
-WASM-NEXT: Size: 3
-WASM-NEXT: Offset: 66
-WASM-NEXT: Memories [
-WASM-NEXT: Memory {
-WASM-NEXT: InitialPages: 0
-WASM-NEXT: }
-WASM-NEXT: ]
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: EXPORT (0x7)
-WASM-NEXT: Size: 7
-WASM-NEXT: Offset: 75
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: ELEM (0x9)
-WASM-NEXT: Size: 7
-WASM-NEXT: Offset: 88
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: CODE (0xA)
-WASM-NEXT: Size: 61
-WASM-NEXT: Offset: 101
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: CUSTOM (0x0)
-WASM-NEXT: Size: 17
-WASM-NEXT: Offset: 168
-WASM-NEXT: Name: name
-WASM-NEXT: }
-WASM-NEXT: Section {
-WASM-NEXT: Type: CUSTOM (0x0)
-WASM-NEXT: Size: 24
-WASM-NEXT: Offset: 191
-WASM-NEXT: Name: reloc.CODE
-WASM-NEXT: }
-WASM-NEXT:]
+WASM: Sections [
+WASM-NEXT: Section {
+WASM-NEXT: Type: TYPE (0x1)
+WASM-NEXT: Size: 14
+WASM-NEXT: Offset: 8
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: IMPORT (0x2)
+WASM-NEXT: Size: 36
+WASM-NEXT: Offset: 28
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: FUNCTION (0x3)
+WASM-NEXT: Size: 2
+WASM-NEXT: Offset: 70
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: TABLE (0x4)
+WASM-NEXT: Size: 4
+WASM-NEXT: Offset: 78
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: MEMORY (0x5)
+WASM-NEXT: Size: 3
+WASM-NEXT: Offset: 88
+WASM-NEXT: Memories [
+WASM-NEXT: Memory {
+WASM-NEXT: InitialPages: 1
+WASM-NEXT: }
+WASM-NEXT: ]
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: GLOBAL (0x6)
+WASM-NEXT: Size: 6
+WASM-NEXT: Offset: 97
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: EXPORT (0x7)
+WASM-NEXT: Size: 8
+WASM-NEXT: Offset: 109
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: CODE (0xA)
+WASM-NEXT: Size: 25
+WASM-NEXT: Offset: 123
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: DATA (0xB)
+WASM-NEXT: Size: 19
+WASM-NEXT: Offset: 154
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: CUSTOM (0x0)
+WASM-NEXT: Size: 43
+WASM-NEXT: Offset: 179
+WASM-NEXT: Name: name
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: CUSTOM (0x0)
+WASM-NEXT: Size: 23
+WASM-NEXT: Offset: 228
+WASM-NEXT: Name: reloc.CODE
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: CUSTOM (0x0)
+WASM-NEXT: Size: 22
+WASM-NEXT: Offset: 257
+WASM-NEXT: Name: linking
+WASM-NEXT: DataSize: 13
+WASM-NEXT: DataAlignment: 1
+WASM-NEXT: }
+WASM-NEXT: ]
diff --git a/test/tools/llvm-readobj/symbols.test b/test/tools/llvm-readobj/symbols.test
index da8a70b031ab..380c6f6a5ee5 100644
--- a/test/tools/llvm-readobj/symbols.test
+++ b/test/tools/llvm-readobj/symbols.test
@@ -73,22 +73,32 @@ ELF-NEXT: }
WASM: Symbols [
WASM-NEXT: Symbol {
-WASM-NEXT: Name: bar
-WASM-NEXT: Type: GLOBAL_IMPORT (0x2)
+WASM-NEXT: Name: puts
+WASM-NEXT: Type: FUNCTION_IMPORT (0x0)
WASM-NEXT: Flags: 0x0
WASM-NEXT: }
WASM-NEXT: Symbol {
-WASM-NEXT: Name: baz
-WASM-NEXT: Type: GLOBAL_IMPORT (0x2)
+WASM-NEXT: Name: SomeOtherFunction
+WASM-NEXT: Type: FUNCTION_IMPORT (0x0)
WASM-NEXT: Flags: 0x0
WASM-NEXT: }
WASM-NEXT: Symbol {
-WASM-NEXT: Name: foo
+WASM-NEXT: Name: main
WASM-NEXT: Type: FUNCTION_EXPORT (0x1)
WASM-NEXT: Flags: 0x0
WASM-NEXT: }
WASM-NEXT: Symbol {
-WASM-NEXT: Name: foo
+WASM-NEXT: Name: puts
+WASM-NEXT: Type: DEBUG_FUNCTION_NAME (0x4)
+WASM-NEXT: Flags: 0x0
+WASM-NEXT: }
+WASM-NEXT: Symbol {
+WASM-NEXT: Name: SomeOtherFunction
+WASM-NEXT: Type: DEBUG_FUNCTION_NAME (0x4)
+WASM-NEXT: Flags: 0x0
+WASM-NEXT: }
+WASM-NEXT: Symbol {
+WASM-NEXT: Name: main
WASM-NEXT: Type: DEBUG_FUNCTION_NAME (0x4)
WASM-NEXT: Flags: 0x0
WASM-NEXT: }
diff --git a/tools/gold/gold-plugin.cpp b/tools/gold/gold-plugin.cpp
index cf207d9dbbb3..6d011bab079d 100644
--- a/tools/gold/gold-plugin.cpp
+++ b/tools/gold/gold-plugin.cpp
@@ -477,7 +477,7 @@ static ld_plugin_status claim_file_hook(const ld_plugin_input_file *file,
std::unique_ptr<InputFile> Obj = std::move(*ObjOrErr);
- Modules.resize(Modules.size() + 1);
+ Modules.emplace_back();
claimed_file &cf = Modules.back();
cf.handle = file->handle;
diff --git a/tools/lli/OrcLazyJIT.cpp b/tools/lli/OrcLazyJIT.cpp
index 2e15894152f9..f1a752e0790d 100644
--- a/tools/lli/OrcLazyJIT.cpp
+++ b/tools/lli/OrcLazyJIT.cpp
@@ -148,18 +148,19 @@ int llvm::runOrcLazyJIT(std::vector<std::unique_ptr<Module>> Ms,
// Add the module, look up main and run it.
for (auto &M : Ms)
- J.addModule(std::shared_ptr<Module>(std::move(M)));
- auto MainSym = J.findSymbol("main");
-
- if (!MainSym) {
+ cantFail(J.addModule(std::shared_ptr<Module>(std::move(M))));
+
+ if (auto MainSym = J.findSymbol("main")) {
+ typedef int (*MainFnPtr)(int, const char*[]);
+ std::vector<const char *> ArgV;
+ for (auto &Arg : Args)
+ ArgV.push_back(Arg.c_str());
+ auto Main = fromTargetAddress<MainFnPtr>(cantFail(MainSym.getAddress()));
+ return Main(ArgV.size(), (const char**)ArgV.data());
+ } else if (auto Err = MainSym.takeError())
+ logAllUnhandledErrors(std::move(Err), llvm::errs(), "");
+ else
errs() << "Could not find main function.\n";
- return 1;
- }
- using MainFnPtr = int (*)(int, const char*[]);
- std::vector<const char *> ArgV;
- for (auto &Arg : Args)
- ArgV.push_back(Arg.c_str());
- auto Main = fromTargetAddress<MainFnPtr>(MainSym.getAddress());
- return Main(ArgV.size(), (const char**)ArgV.data());
+ return 1;
}
diff --git a/tools/lli/OrcLazyJIT.h b/tools/lli/OrcLazyJIT.h
index fc02a10b514e..47a2acc4d7e6 100644
--- a/tools/lli/OrcLazyJIT.h
+++ b/tools/lli/OrcLazyJIT.h
@@ -61,7 +61,8 @@ public:
IndirectStubsManagerBuilder IndirectStubsMgrBuilder,
bool InlineStubs)
: TM(std::move(TM)), DL(this->TM->createDataLayout()),
- CCMgr(std::move(CCMgr)),
+ CCMgr(std::move(CCMgr)),
+ ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, orc::SimpleCompiler(*this->TM)),
IRDumpLayer(CompileLayer, createDebugDumper()),
CODLayer(IRDumpLayer, extractSingleFunction, *this->CCMgr,
@@ -74,10 +75,14 @@ public:
CXXRuntimeOverrides.runDestructors();
// Run any IR destructors.
for (auto &DtorRunner : IRStaticDestructorRunners)
- DtorRunner.runViaLayer(CODLayer);
+ if (auto Err = DtorRunner.runViaLayer(CODLayer)) {
+ // FIXME: OrcLazyJIT should probably take a "shutdownError" callback to
+ // report these errors on.
+ report_fatal_error(std::move(Err));
+ }
}
- void addModule(std::shared_ptr<Module> M) {
+ Error addModule(std::shared_ptr<Module> M) {
if (M->getDataLayout().isDefault())
M->setDataLayout(DL);
@@ -124,21 +129,27 @@ public:
);
// Add the module to the JIT.
- ModulesHandle =
- CODLayer.addModule(std::move(M),
- llvm::make_unique<SectionMemoryManager>(),
- std::move(Resolver));
+ if (auto ModulesHandleOrErr =
+ CODLayer.addModule(std::move(M), std::move(Resolver)))
+ ModulesHandle = std::move(*ModulesHandleOrErr);
+ else
+ return ModulesHandleOrErr.takeError();
+
} else
- CODLayer.addExtraModule(ModulesHandle, std::move(M));
+ if (auto Err = CODLayer.addExtraModule(ModulesHandle, std::move(M)))
+ return Err;
// Run the static constructors, and save the static destructor runner for
// execution when the JIT is torn down.
orc::CtorDtorRunner<CODLayerT> CtorRunner(std::move(CtorNames),
ModulesHandle);
- CtorRunner.runViaLayer(CODLayer);
+ if (auto Err = CtorRunner.runViaLayer(CODLayer))
+ return Err;
IRStaticDestructorRunners.emplace_back(std::move(DtorNames),
ModulesHandle);
+
+ return Error::success();
}
JITSymbol findSymbol(const std::string &Name) {
diff --git a/tools/lli/RemoteJITUtils.h b/tools/lli/RemoteJITUtils.h
index 3c82f73ff072..4e948413865c 100644
--- a/tools/lli/RemoteJITUtils.h
+++ b/tools/lli/RemoteJITUtils.h
@@ -84,7 +84,7 @@ public:
this->MemMgr = std::move(MemMgr);
}
- void setResolver(std::unique_ptr<JITSymbolResolver> Resolver) {
+ void setResolver(std::shared_ptr<JITSymbolResolver> Resolver) {
this->Resolver = std::move(Resolver);
}
@@ -145,7 +145,7 @@ public:
private:
std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr;
- std::unique_ptr<JITSymbolResolver> Resolver;
+ std::shared_ptr<JITSymbolResolver> Resolver;
};
}
diff --git a/tools/lli/lli.cpp b/tools/lli/lli.cpp
index f228a3619457..091ca22b4e82 100644
--- a/tools/lli/lli.cpp
+++ b/tools/lli/lli.cpp
@@ -646,7 +646,7 @@ int main(int argc, char **argv, char * const *envp) {
// else == "if (RemoteMCJIT)"
// Remote target MCJIT doesn't (yet) support static constructors. No reason
- // it couldn't. This is a limitation of the LLI implemantation, not the
+ // it couldn't. This is a limitation of the LLI implementation, not the
// MCJIT itself. FIXME.
// Lanch the remote process and get a channel to it.
diff --git a/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp b/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
index 528247c2dbc3..529bdf5b7d93 100644
--- a/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
+++ b/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
@@ -71,6 +71,10 @@ static cl::opt<bool>
ShowBinaryBlobs("show-binary-blobs",
cl::desc("Print binary blobs using hex escapes"));
+static cl::opt<std::string> CheckHash(
+ "check-hash",
+ cl::desc("Check module hash using the argument as a string table"));
+
namespace {
/// CurStreamTypeType - A type for CurStreamType
@@ -652,13 +656,15 @@ static bool ParseBlock(BitstreamCursor &Stream, BitstreamBlockInfo &BlockInfo,
}
// If we found a module hash, let's verify that it matches!
- if (BlockID == bitc::MODULE_BLOCK_ID && Code == bitc::MODULE_CODE_HASH) {
+ if (BlockID == bitc::MODULE_BLOCK_ID && Code == bitc::MODULE_CODE_HASH &&
+ !CheckHash.empty()) {
if (Record.size() != 5)
outs() << " (invalid)";
else {
// Recompute the hash and compare it to the one in the bitcode
SHA1 Hasher;
StringRef Hash;
+ Hasher.update(CheckHash);
{
int BlockSize = (CurrentRecordPos / 8) - BlockEntryPos;
auto Ptr = Stream.getPointerToByte(BlockEntryPos, BlockSize);
diff --git a/tools/llvm-c-test/echo.cpp b/tools/llvm-c-test/echo.cpp
index 52ce85c57782..966c0083bf87 100644
--- a/tools/llvm-c-test/echo.cpp
+++ b/tools/llvm-c-test/echo.cpp
@@ -765,7 +765,7 @@ static void declare_symbols(LLVMModuleRef Src, LLVMModuleRef M) {
LLVMValueRef Next = nullptr;
if (!Begin) {
if (End != nullptr)
- report_fatal_error("Range has an end but no begining");
+ report_fatal_error("Range has an end but no beginning");
goto FunDecl;
}
@@ -794,7 +794,7 @@ FunDecl:
End = LLVMGetLastFunction(Src);
if (!Begin) {
if (End != nullptr)
- report_fatal_error("Range has an end but no begining");
+ report_fatal_error("Range has an end but no beginning");
return;
}
@@ -844,7 +844,7 @@ static void clone_symbols(LLVMModuleRef Src, LLVMModuleRef M) {
LLVMValueRef Next = nullptr;
if (!Begin) {
if (End != nullptr)
- report_fatal_error("Range has an end but no begining");
+ report_fatal_error("Range has an end but no beginning");
goto FunClone;
}
@@ -885,7 +885,7 @@ FunClone:
End = LLVMGetLastFunction(Src);
if (!Begin) {
if (End != nullptr)
- report_fatal_error("Range has an end but no begining");
+ report_fatal_error("Range has an end but no beginning");
return;
}
diff --git a/tools/llvm-cov/CodeCoverage.cpp b/tools/llvm-cov/CodeCoverage.cpp
index 6179c760d5b2..3cbd6591134b 100644
--- a/tools/llvm-cov/CodeCoverage.cpp
+++ b/tools/llvm-cov/CodeCoverage.cpp
@@ -32,6 +32,7 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Support/ThreadPool.h"
#include "llvm/Support/ToolOutputFile.h"
#include <functional>
@@ -705,6 +706,12 @@ int CodeCoverageTool::show(int argc, const char **argv,
"project-title", cl::Optional,
cl::desc("Set project title for the coverage report"));
+ cl::opt<unsigned> NumThreads(
+ "num-threads", cl::init(0),
+ cl::desc("Number of merge threads to use (default: autodetect)"));
+ cl::alias NumThreadsA("j", cl::desc("Alias for --num-threads"),
+ cl::aliasopt(NumThreads));
+
auto Err = commandLineParser(argc, argv);
if (Err)
return Err;
@@ -790,15 +797,19 @@ int CodeCoverageTool::show(int argc, const char **argv,
}
}
- // FIXME: Sink the hardware_concurrency() == 1 check into ThreadPool.
- if (!ViewOpts.hasOutputDirectory() ||
- std::thread::hardware_concurrency() == 1) {
+ // If NumThreads is not specified, auto-detect a good default.
+ if (NumThreads == 0)
+ NumThreads =
+ std::max(1U, std::min(llvm::heavyweight_hardware_concurrency(),
+ unsigned(SourceFiles.size())));
+
+ if (!ViewOpts.hasOutputDirectory() || NumThreads == 1) {
for (const std::string &SourceFile : SourceFiles)
writeSourceFileView(SourceFile, Coverage.get(), Printer.get(),
ShowFilenames);
} else {
// In -output-dir mode, it's safe to use multiple threads to print files.
- ThreadPool Pool;
+ ThreadPool Pool(NumThreads);
for (const std::string &SourceFile : SourceFiles)
Pool.async(&CodeCoverageTool::writeSourceFileView, this, SourceFile,
Coverage.get(), Printer.get(), ShowFilenames);
diff --git a/tools/llvm-lto/llvm-lto.cpp b/tools/llvm-lto/llvm-lto.cpp
index ccc673be4570..87cd13ad70de 100644
--- a/tools/llvm-lto/llvm-lto.cpp
+++ b/tools/llvm-lto/llvm-lto.cpp
@@ -383,7 +383,7 @@ loadAllFilesForIndex(const ModuleSummaryIndex &Index) {
for (auto &ModPath : Index.modulePaths()) {
const auto &Filename = ModPath.first();
- auto CurrentActivity = "loading file '" + Filename + "'";
+ std::string CurrentActivity = ("loading file '" + Filename + "'").str();
auto InputOrErr = MemoryBuffer::getFile(Filename);
error(InputOrErr, "error " + CurrentActivity);
InputBuffers.push_back(std::move(*InputOrErr));
@@ -475,7 +475,7 @@ private:
std::vector<std::unique_ptr<MemoryBuffer>> InputBuffers;
for (unsigned i = 0; i < InputFilenames.size(); ++i) {
auto &Filename = InputFilenames[i];
- StringRef CurrentActivity = "loading file '" + Filename + "'";
+ std::string CurrentActivity = "loading file '" + Filename + "'";
auto InputOrErr = MemoryBuffer::getFile(Filename);
error(InputOrErr, "error " + CurrentActivity);
InputBuffers.push_back(std::move(*InputOrErr));
@@ -710,7 +710,7 @@ private:
std::vector<std::unique_ptr<MemoryBuffer>> InputBuffers;
for (unsigned i = 0; i < InputFilenames.size(); ++i) {
auto &Filename = InputFilenames[i];
- StringRef CurrentActivity = "loading file '" + Filename + "'";
+ std::string CurrentActivity = "loading file '" + Filename + "'";
auto InputOrErr = MemoryBuffer::getFile(Filename);
error(InputOrErr, "error " + CurrentActivity);
InputBuffers.push_back(std::move(*InputOrErr));
diff --git a/tools/llvm-objdump/llvm-objdump.cpp b/tools/llvm-objdump/llvm-objdump.cpp
index be5635a3d4c6..812f1af3ac68 100644
--- a/tools/llvm-objdump/llvm-objdump.cpp
+++ b/tools/llvm-objdump/llvm-objdump.cpp
@@ -1032,7 +1032,7 @@ static std::error_code getRelocationValueString(const MachOObjectFile *Obj,
case MachO::ARM_RELOC_HALF_SECTDIFF: {
// Half relocations steal a bit from the length field to encode
// whether this is an upper16 or a lower16 relocation.
- bool isUpper = Obj->getAnyRelocationLength(RE) >> 1;
+ bool isUpper = (Obj->getAnyRelocationLength(RE) & 0x1) == 1;
if (isUpper)
fmt << ":upper16:(";
diff --git a/tools/llvm-pdbutil/CMakeLists.txt b/tools/llvm-pdbutil/CMakeLists.txt
index 7a3245424efc..bc28e6bdd7ea 100644
--- a/tools/llvm-pdbutil/CMakeLists.txt
+++ b/tools/llvm-pdbutil/CMakeLists.txt
@@ -11,6 +11,7 @@ add_llvm_tool(llvm-pdbutil
Analyze.cpp
BytesOutputStyle.cpp
Diff.cpp
+ DiffPrinter.cpp
DumpOutputStyle.cpp
llvm-pdbutil.cpp
FormatUtil.cpp
diff --git a/tools/llvm-pdbutil/Diff.cpp b/tools/llvm-pdbutil/Diff.cpp
index 9b38ae1d603e..aad4e1bf1427 100644
--- a/tools/llvm-pdbutil/Diff.cpp
+++ b/tools/llvm-pdbutil/Diff.cpp
@@ -9,22 +9,162 @@
#include "Diff.h"
+#include "DiffPrinter.h"
+#include "FormatUtil.h"
#include "StreamUtil.h"
#include "llvm-pdbutil.h"
+#include "llvm/ADT/StringSet.h"
+
+#include "llvm/DebugInfo/PDB/Native/DbiStream.h"
#include "llvm/DebugInfo/PDB/Native/Formatters.h"
#include "llvm/DebugInfo/PDB/Native/InfoStream.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTable.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatAdapters.h"
#include "llvm/Support/FormatProviders.h"
#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Path.h"
using namespace llvm;
using namespace llvm::pdb;
+namespace {
+// Compare and format two stream numbers. Stream numbers are considered
+// identical if they contain the same value, equivalent if they are both
+// the invalid stream or neither is the invalid stream, and different if
+// one is the invalid stream and another isn't.
+struct StreamNumberProvider {
+ static DiffResult compare(uint16_t L, uint16_t R) {
+ if (L == R)
+ return DiffResult::IDENTICAL;
+ bool LP = L != kInvalidStreamIndex;
+ bool RP = R != kInvalidStreamIndex;
+ if (LP != RP)
+ return DiffResult::DIFFERENT;
+ return DiffResult::EQUIVALENT;
+ }
+
+ static std::string format(uint16_t SN, bool Right) {
+ if (SN == kInvalidStreamIndex)
+ return "(not present)";
+ return formatv("{0}", SN).str();
+ }
+};
+
+// Compares and formats two module indices. Modis are considered identical
+// if they are identical, equivalent if they either both contain a value or
+// both don't contain a value, and different if one contains a value and the
+// other doesn't.
+struct ModiProvider {
+ DiffResult compare(Optional<uint32_t> L, Optional<uint32_t> R) {
+ if (L == R)
+ return DiffResult::IDENTICAL;
+ if (L.hasValue() != R.hasValue())
+ return DiffResult::DIFFERENT;
+ return DiffResult::EQUIVALENT;
+ }
+
+ std::string format(Optional<uint32_t> Modi, bool Right) {
+ if (!Modi.hasValue())
+ return "(not present)";
+ return formatv("{0}", *Modi).str();
+ }
+};
+
+// Compares and formats two paths embedded in the PDB, ignoring the beginning
+// of the path if the user specified it as a "root path" on the command line.
+struct BinaryPathProvider {
+ explicit BinaryPathProvider(uint32_t MaxLen) : MaxLen(MaxLen) {}
+
+ DiffResult compare(StringRef L, StringRef R) {
+ if (L == R)
+ return DiffResult::IDENTICAL;
+
+ SmallString<64> LN = removeRoot(L, false);
+ SmallString<64> RN = removeRoot(R, true);
+
+ return (LN.equals_lower(RN)) ? DiffResult::EQUIVALENT
+ : DiffResult::DIFFERENT;
+ }
+
+ std::string format(StringRef S, bool Right) {
+ if (S.empty())
+ return "(empty)";
+
+ SmallString<64> Native = removeRoot(S, Right);
+ return truncateStringFront(Native.str(), MaxLen);
+ }
+
+ SmallString<64> removeRoot(StringRef Path, bool IsRight) const {
+ SmallString<64> Native(Path);
+ auto &RootOpt = IsRight ? opts::diff::RightRoot : opts::diff::LeftRoot;
+ SmallString<64> Root(static_cast<std::string>(RootOpt));
+ // pdb paths always use windows syntax, convert slashes to backslashes.
+ sys::path::native(Root, sys::path::Style::windows);
+ if (sys::path::has_stem(Root, sys::path::Style::windows))
+ sys::path::append(Root, sys::path::Style::windows,
+ sys::path::get_separator(sys::path::Style::windows));
+
+ sys::path::replace_path_prefix(Native, Root, "", sys::path::Style::windows);
+ return Native;
+ }
+ uint32_t MaxLen;
+};
+
+// Compare and format two stream purposes. For general streams, this just
+// compares the description. For module streams it uses the path comparison
+// algorithm taking into consideration the binary root, described above.
+// Formatting stream purposes just prints the stream purpose, except for
+// module streams and named streams, where it prefixes the name / module
+// with an identifier. Example:
+//
+// Named Stream "\names"
+// Module Stream "foo.obj"
+//
+// If a named stream is too long to fit in a column, it is truncated at the
+// end, and if a module is too long to fit in a column, it is truncated at the
+// beginning. Example:
+//
+// Named Stream "\Really Long Str..."
+// Module Stream "...puts\foo.obj"
+//
+struct StreamPurposeProvider {
+ explicit StreamPurposeProvider(uint32_t MaxLen) : MaxLen(MaxLen) {}
+
+ DiffResult compare(const std::pair<StreamPurpose, std::string> &L,
+ const std::pair<StreamPurpose, std::string> &R) {
+ if (L.first != R.first)
+ return DiffResult::DIFFERENT;
+ if (L.first == StreamPurpose::ModuleStream) {
+ BinaryPathProvider PathProvider(MaxLen);
+ return PathProvider.compare(L.second, R.second);
+ }
+ return (L.second == R.second) ? DiffResult::IDENTICAL
+ : DiffResult::DIFFERENT;
+ }
+
+ std::string format(const std::pair<StreamPurpose, std::string> &P,
+ bool Right) {
+ if (P.first == StreamPurpose::Other)
+ return truncateStringBack(P.second, MaxLen);
+ if (P.first == StreamPurpose::NamedStream)
+ return truncateQuotedNameBack("Named Stream", P.second, MaxLen);
+
+ assert(P.first == StreamPurpose::ModuleStream);
+ uint32_t ExtraChars = strlen("Module \"\"");
+ BinaryPathProvider PathProvider(MaxLen - ExtraChars);
+ std::string Result = PathProvider.format(P.second, Right);
+ return formatv("Module \"{0}\"", Result);
+ }
+
+ uint32_t MaxLen;
+};
+} // namespace
+
namespace llvm {
template <> struct format_provider<PdbRaw_FeatureSig> {
static void format(const PdbRaw_FeatureSig &Sig, raw_ostream &Stream,
@@ -49,47 +189,6 @@ template <> struct format_provider<PdbRaw_FeatureSig> {
template <typename R> using ValueOfRange = llvm::detail::ValueOfRange<R>;
-template <typename Range, typename Comp>
-static void set_differences(Range &&R1, Range &&R2,
- SmallVectorImpl<ValueOfRange<Range>> *OnlyLeft,
- SmallVectorImpl<ValueOfRange<Range>> *OnlyRight,
- SmallVectorImpl<ValueOfRange<Range>> *Intersection,
- Comp Comparator) {
-
- std::sort(R1.begin(), R1.end(), Comparator);
- std::sort(R2.begin(), R2.end(), Comparator);
-
- if (OnlyLeft) {
- OnlyLeft->reserve(R1.size());
- auto End = std::set_difference(R1.begin(), R1.end(), R2.begin(), R2.end(),
- OnlyLeft->begin(), Comparator);
- OnlyLeft->set_size(std::distance(OnlyLeft->begin(), End));
- }
- if (OnlyRight) {
- OnlyLeft->reserve(R2.size());
- auto End = std::set_difference(R2.begin(), R2.end(), R1.begin(), R1.end(),
- OnlyRight->begin(), Comparator);
- OnlyRight->set_size(std::distance(OnlyRight->begin(), End));
- }
- if (Intersection) {
- Intersection->reserve(std::min(R1.size(), R2.size()));
- auto End = std::set_intersection(R1.begin(), R1.end(), R2.begin(), R2.end(),
- Intersection->begin(), Comparator);
- Intersection->set_size(std::distance(Intersection->begin(), End));
- }
-}
-
-template <typename Range>
-static void
-set_differences(Range &&R1, Range &&R2,
- SmallVectorImpl<ValueOfRange<Range>> *OnlyLeft,
- SmallVectorImpl<ValueOfRange<Range>> *OnlyRight,
- SmallVectorImpl<ValueOfRange<Range>> *Intersection = nullptr) {
- std::less<ValueOfRange<Range>> Comp;
- set_differences(std::forward<Range>(R1), std::forward<Range>(R2), OnlyLeft,
- OnlyRight, Intersection, Comp);
-}
-
DiffStyle::DiffStyle(PDBFile &File1, PDBFile &File2)
: File1(File1), File2(File2) {}
@@ -136,300 +235,363 @@ Error DiffStyle::dump() {
return Error::success();
}
-template <typename T>
-static bool diffAndPrint(StringRef Label, PDBFile &File1, PDBFile &File2, T V1,
- T V2) {
- if (V1 == V2) {
- outs() << formatv(" {0}: No differences detected!\n", Label);
- return false;
- }
-
- outs().indent(2) << Label << "\n";
- outs().indent(4) << formatv("{0}: {1}\n", File1.getFilePath(), V1);
- outs().indent(4) << formatv("{0}: {1}\n", File2.getFilePath(), V2);
- return true;
-}
-
-template <typename T>
-static bool diffAndPrint(StringRef Label, PDBFile &File1, PDBFile &File2,
- ArrayRef<T> V1, ArrayRef<T> V2) {
- if (V1 == V2) {
- outs() << formatv(" {0}: No differences detected!\n", Label);
- return false;
- }
-
- outs().indent(2) << Label << "\n";
- outs().indent(4) << formatv("{0}: {1}\n", File1.getFilePath(),
- make_range(V1.begin(), V1.end()));
- outs().indent(4) << formatv("{0}: {1}\n", File2.getFilePath(),
- make_range(V2.begin(), V2.end()));
- return true;
-}
-
-template <typename T>
-static bool printSymmetricDifferences(PDBFile &File1, PDBFile &File2,
- T &&OnlyRange1, T &&OnlyRange2,
- StringRef Label) {
- bool HasDiff = false;
- if (!OnlyRange1.empty()) {
- HasDiff = true;
- outs() << formatv(" {0} {1}(s) only in ({2})\n", OnlyRange1.size(), Label,
- File1.getFilePath());
- for (const auto &Item : OnlyRange1)
- outs() << formatv(" {0}\n", Label, Item);
- }
- if (!OnlyRange2.empty()) {
- HasDiff = true;
- outs() << formatv(" {0} {1}(s) only in ({2})\n", OnlyRange2.size(),
- File2.getFilePath());
- for (const auto &Item : OnlyRange2)
- outs() << formatv(" {0}\n", Item);
- }
- return HasDiff;
-}
-
Error DiffStyle::diffSuperBlock() {
- outs() << "MSF Super Block: Searching for differences...\n";
- bool Diffs = false;
-
- Diffs |= diffAndPrint("Block Size", File1, File2, File1.getBlockSize(),
- File2.getBlockSize());
- Diffs |= diffAndPrint("Block Count", File1, File2, File1.getBlockCount(),
- File2.getBlockCount());
- Diffs |= diffAndPrint("Unknown 1", File1, File2, File1.getUnknown1(),
- File2.getUnknown1());
- if (!Diffs)
- outs() << "MSF Super Block: No differences detected...\n";
+ DiffPrinter D(2, "MSF Super Block", 16, 20, opts::diff::PrintResultColumn,
+ opts::diff::PrintValueColumns, outs());
+ D.printExplicit("File", DiffResult::UNSPECIFIED,
+ truncateStringFront(File1.getFilePath(), 18),
+ truncateStringFront(File2.getFilePath(), 18));
+ D.print("Block Size", File1.getBlockSize(), File2.getBlockSize());
+ D.print("Block Count", File1.getBlockCount(), File2.getBlockCount());
+ D.print("Unknown 1", File1.getUnknown1(), File2.getUnknown1());
+ D.print("Directory Size", File1.getNumDirectoryBytes(),
+ File2.getNumDirectoryBytes());
return Error::success();
}
Error DiffStyle::diffStreamDirectory() {
- SmallVector<std::string, 32> P;
- SmallVector<std::string, 32> Q;
+ DiffPrinter D(2, "Stream Directory", 30, 20, opts::diff::PrintResultColumn,
+ opts::diff::PrintValueColumns, outs());
+ D.printExplicit("File", DiffResult::UNSPECIFIED,
+ truncateStringFront(File1.getFilePath(), 18),
+ truncateStringFront(File2.getFilePath(), 18));
+
+ SmallVector<std::pair<StreamPurpose, std::string>, 32> P;
+ SmallVector<std::pair<StreamPurpose, std::string>, 32> Q;
discoverStreamPurposes(File1, P);
discoverStreamPurposes(File2, Q);
- outs() << "Stream Directory: Searching for differences...\n";
-
- bool HasDifferences = false;
+ D.print("Stream Count", File1.getNumStreams(), File2.getNumStreams());
auto PI = to_vector<32>(enumerate(P));
auto QI = to_vector<32>(enumerate(Q));
- typedef decltype(PI) ContainerType;
- typedef typename ContainerType::value_type value_type;
-
- auto Comparator = [](const value_type &I1, const value_type &I2) {
- return I1.value() < I2.value();
- };
-
- decltype(PI) OnlyP;
- decltype(QI) OnlyQ;
- decltype(PI) Common;
-
- set_differences(PI, QI, &OnlyP, &OnlyQ, &Common, Comparator);
-
- if (!OnlyP.empty()) {
- HasDifferences = true;
- outs().indent(2) << formatv("{0} Stream(s) only in ({1})\n", OnlyP.size(),
- File1.getFilePath());
- for (auto &Item : OnlyP) {
- outs().indent(4) << formatv("Stream {0} - {1}\n", Item.index(),
- Item.value());
+ // Scan all streams in the left hand side, looking for ones that are also
+ // in the right. Each time we find one, remove it. When we're done, Q
+ // should contain all the streams that are in the right but not in the left.
+ StreamPurposeProvider StreamProvider(28);
+ for (const auto &P : PI) {
+ typedef decltype(PI) ContainerType;
+ typedef typename ContainerType::value_type value_type;
+
+ auto Iter = llvm::find_if(QI, [P, &StreamProvider](const value_type &V) {
+ DiffResult Result = StreamProvider.compare(P.value(), V.value());
+ return Result == DiffResult::EQUIVALENT ||
+ Result == DiffResult::IDENTICAL;
+ });
+
+ if (Iter == QI.end()) {
+ D.printExplicit(StreamProvider.format(P.value(), false),
+ DiffResult::DIFFERENT, P.index(), "(not present)");
+ continue;
}
- }
- if (!OnlyQ.empty()) {
- HasDifferences = true;
- outs().indent(2) << formatv("{0} Streams(s) only in ({1})\n", OnlyQ.size(),
- File2.getFilePath());
- for (auto &Item : OnlyQ) {
- outs().indent(4) << formatv("Stream {0} - {1}\n", Item.index(),
- Item.value());
- }
+ D.print<EquivalentDiffProvider>(StreamProvider.format(P.value(), false),
+ P.index(), Iter->index());
+ QI.erase(Iter);
}
- if (!Common.empty()) {
- outs().indent(2) << formatv("Found {0} common streams. Searching for "
- "intra-stream differences.\n",
- Common.size());
- bool HasCommonDifferences = false;
- for (const auto &Left : Common) {
- // Left was copied from the first range so its index refers to a stream
- // index in the first file. Find the corresponding stream index in the
- // second file.
- auto Range =
- std::equal_range(QI.begin(), QI.end(), Left,
- [](const value_type &L, const value_type &R) {
- return L.value() < R.value();
- });
- const auto &Right = *Range.first;
- assert(Left.value() == Right.value());
- uint32_t LeftSize = File1.getStreamByteSize(Left.index());
- uint32_t RightSize = File2.getStreamByteSize(Right.index());
- if (LeftSize != RightSize) {
- HasDifferences = true;
- HasCommonDifferences = true;
- outs().indent(4) << formatv("{0} ({1}: {2} bytes, {3}: {4} bytes)\n",
- Left.value(), File1.getFilePath(), LeftSize,
- File2.getFilePath(), RightSize);
- }
- }
- if (!HasCommonDifferences)
- outs().indent(2) << "Common Streams: No differences detected!\n";
+
+ for (const auto &Q : QI) {
+ D.printExplicit(StreamProvider.format(Q.value(), true),
+ DiffResult::DIFFERENT, "(not present)", Q.index());
}
- if (!HasDifferences)
- outs() << "Stream Directory: No differences detected!\n";
return Error::success();
}
Error DiffStyle::diffStringTable() {
+ DiffPrinter D(2, "String Table", 30, 20, opts::diff::PrintResultColumn,
+ opts::diff::PrintValueColumns, outs());
+ D.printExplicit("File", DiffResult::UNSPECIFIED,
+ truncateStringFront(File1.getFilePath(), 18),
+ truncateStringFront(File2.getFilePath(), 18));
+
auto ExpectedST1 = File1.getStringTable();
auto ExpectedST2 = File2.getStringTable();
- outs() << "String Table: Searching for differences...\n";
bool Has1 = !!ExpectedST1;
bool Has2 = !!ExpectedST2;
- if (!(Has1 && Has2)) {
- // If one has a string table and the other doesn't, we can print less
- // output.
- if (Has1 != Has2) {
- if (Has1) {
- outs() << formatv(" {0}: ({1} strings)\n", File1.getFilePath(),
- ExpectedST1->getNameCount());
- outs() << formatv(" {0}: (string table not present)\n",
- File2.getFilePath());
- } else {
- outs() << formatv(" {0}: (string table not present)\n",
- File1.getFilePath());
- outs() << formatv(" {0}: ({1})\n", File2.getFilePath(),
- ExpectedST2->getNameCount());
- }
- }
+ std::string Count1 = Has1 ? llvm::utostr(ExpectedST1->getNameCount())
+ : "(string table not present)";
+ std::string Count2 = Has2 ? llvm::utostr(ExpectedST2->getNameCount())
+ : "(string table not present)";
+ D.print("Number of Strings", Count1, Count2);
+
+ if (!Has1 || !Has2) {
consumeError(ExpectedST1.takeError());
consumeError(ExpectedST2.takeError());
return Error::success();
}
- bool HasDiff = false;
auto &ST1 = *ExpectedST1;
auto &ST2 = *ExpectedST2;
- if (ST1.getByteSize() != ST2.getByteSize()) {
- outs() << " Stream Size\n";
- outs() << formatv(" {0} - {1} byte(s)\n", File1.getFilePath(),
- ST1.getByteSize());
- outs() << formatv(" {0} - {1} byte(s)\n", File2.getFilePath(),
- ST2.getByteSize());
- outs() << formatv(" Difference: {0} bytes\n",
- AbsoluteDifference(ST1.getByteSize(), ST2.getByteSize()));
- HasDiff = true;
- }
- HasDiff |= diffAndPrint("Hash Version", File1, File2, ST1.getHashVersion(),
- ST1.getHashVersion());
- HasDiff |= diffAndPrint("Signature", File1, File2, ST1.getSignature(),
- ST1.getSignature());
+ D.print("Hash Version", ST1.getHashVersion(), ST2.getHashVersion());
+ D.print("Byte Size", ST1.getByteSize(), ST2.getByteSize());
+ D.print("Signature", ST1.getSignature(), ST2.getSignature());
// Both have a valid string table, dive in and compare individual strings.
auto IdList1 = ST1.name_ids();
auto IdList2 = ST2.name_ids();
- std::vector<StringRef> Strings1, Strings2;
- Strings1.reserve(IdList1.size());
- Strings2.reserve(IdList2.size());
+ StringSet<> LS;
+ StringSet<> RS;
+ uint32_t Empty1 = 0;
+ uint32_t Empty2 = 0;
for (auto ID : IdList1) {
auto S = ST1.getStringForID(ID);
if (!S)
return S.takeError();
- Strings1.push_back(*S);
+ if (S->empty())
+ ++Empty1;
+ else
+ LS.insert(*S);
}
for (auto ID : IdList2) {
auto S = ST2.getStringForID(ID);
if (!S)
return S.takeError();
- Strings2.push_back(*S);
+ if (S->empty())
+ ++Empty2;
+ else
+ RS.insert(*S);
+ }
+ D.print("Empty Strings", Empty1, Empty2);
+
+ for (const auto &S : LS) {
+ auto R = RS.find(S.getKey());
+ std::string Truncated = truncateStringMiddle(S.getKey(), 28);
+ uint32_t I = cantFail(ST1.getIDForString(S.getKey()));
+ if (R == RS.end()) {
+ D.printExplicit(Truncated, DiffResult::DIFFERENT, I, "(not present)");
+ continue;
+ }
+
+ uint32_t J = cantFail(ST2.getIDForString(R->getKey()));
+ D.print<EquivalentDiffProvider>(Truncated, I, J);
+ RS.erase(R);
}
- SmallVector<StringRef, 64> OnlyP;
- SmallVector<StringRef, 64> OnlyQ;
- auto End1 = std::remove(Strings1.begin(), Strings1.end(), "");
- auto End2 = std::remove(Strings2.begin(), Strings2.end(), "");
- uint32_t Empty1 = std::distance(End1, Strings1.end());
- uint32_t Empty2 = std::distance(End2, Strings2.end());
- Strings1.erase(End1, Strings1.end());
- Strings2.erase(End2, Strings2.end());
- set_differences(Strings1, Strings2, &OnlyP, &OnlyQ);
- printSymmetricDifferences(File1, File2, OnlyP, OnlyQ, "String");
-
- if (Empty1 != Empty2) {
- PDBFile &MoreF = (Empty1 > Empty2) ? File1 : File2;
- PDBFile &LessF = (Empty1 < Empty2) ? File1 : File2;
- uint32_t Difference = AbsoluteDifference(Empty1, Empty2);
- outs() << formatv(" {0} had {1} more empty strings than {2}\n",
- MoreF.getFilePath(), Difference, LessF.getFilePath());
+ for (const auto &S : RS) {
+ auto L = LS.find(S.getKey());
+ std::string Truncated = truncateStringMiddle(S.getKey(), 28);
+ uint32_t J = cantFail(ST2.getIDForString(S.getKey()));
+ if (L == LS.end()) {
+ D.printExplicit(Truncated, DiffResult::DIFFERENT, "(not present)", J);
+ continue;
+ }
+
+ uint32_t I = cantFail(ST1.getIDForString(L->getKey()));
+ D.print<EquivalentDiffProvider>(Truncated, I, J);
}
- if (!HasDiff)
- outs() << "String Table: No differences detected!\n";
return Error::success();
}
Error DiffStyle::diffFreePageMap() { return Error::success(); }
Error DiffStyle::diffInfoStream() {
+ DiffPrinter D(2, "PDB Stream", 22, 40, opts::diff::PrintResultColumn,
+ opts::diff::PrintValueColumns, outs());
+ D.printExplicit("File", DiffResult::UNSPECIFIED,
+ truncateStringFront(File1.getFilePath(), 38),
+ truncateStringFront(File2.getFilePath(), 38));
+
auto ExpectedInfo1 = File1.getPDBInfoStream();
auto ExpectedInfo2 = File2.getPDBInfoStream();
- outs() << "PDB Stream: Searching for differences...\n";
bool Has1 = !!ExpectedInfo1;
bool Has2 = !!ExpectedInfo2;
if (!(Has1 && Has2)) {
- if (Has1 != Has2)
- outs() << formatv("{0} does not have a PDB Stream!\n",
- Has1 ? File1.getFilePath() : File2.getFilePath());
- consumeError(ExpectedInfo2.takeError());
+ std::string L = Has1 ? "(present)" : "(not present)";
+ std::string R = Has2 ? "(present)" : "(not present)";
+ D.print("Stream", L, R);
+
+ consumeError(ExpectedInfo1.takeError());
consumeError(ExpectedInfo2.takeError());
return Error::success();
}
- bool HasDiff = false;
auto &IS1 = *ExpectedInfo1;
auto &IS2 = *ExpectedInfo2;
- if (IS1.getStreamSize() != IS2.getStreamSize()) {
- outs() << " Stream Size\n";
- outs() << formatv(" {0} - {1} byte(s)\n", File1.getFilePath(),
- IS1.getStreamSize());
- outs() << formatv(" {0} - {1} byte(s)\n", File2.getFilePath(),
- IS2.getStreamSize());
- outs() << formatv(
- " Difference: {0} bytes\n",
- AbsoluteDifference(IS1.getStreamSize(), IS2.getStreamSize()));
- HasDiff = true;
+ D.print("Stream Size", IS1.getStreamSize(), IS2.getStreamSize());
+ D.print("Age", IS1.getAge(), IS2.getAge());
+ D.print("Guid", IS1.getGuid(), IS2.getGuid());
+ D.print("Signature", IS1.getSignature(), IS2.getSignature());
+ D.print("Version", IS1.getVersion(), IS2.getVersion());
+ D.diffUnorderedArray("Feature", IS1.getFeatureSignatures(),
+ IS2.getFeatureSignatures());
+ D.print("Named Stream Size", IS1.getNamedStreamMapByteSize(),
+ IS2.getNamedStreamMapByteSize());
+ StringMap<uint32_t> NSL = IS1.getNamedStreams().getStringMap();
+ StringMap<uint32_t> NSR = IS2.getNamedStreams().getStringMap();
+ D.diffUnorderedMap<EquivalentDiffProvider>("Named Stream", NSL, NSR);
+ return Error::success();
+}
+
+static std::vector<std::pair<uint32_t, DbiModuleDescriptor>>
+getModuleDescriptors(const DbiModuleList &ML) {
+ std::vector<std::pair<uint32_t, DbiModuleDescriptor>> List;
+ List.reserve(ML.getModuleCount());
+ for (uint32_t I = 0; I < ML.getModuleCount(); ++I)
+ List.emplace_back(I, ML.getModuleDescriptor(I));
+ return List;
+}
+
+static void
+diffOneModule(DiffPrinter &D,
+ const std::pair<uint32_t, DbiModuleDescriptor> Item,
+ std::vector<std::pair<uint32_t, DbiModuleDescriptor>> &Other,
+ bool ItemIsRight) {
+ StreamPurposeProvider HeaderProvider(70);
+ std::pair<StreamPurpose, std::string> Header;
+ Header.first = StreamPurpose::ModuleStream;
+ Header.second = Item.second.getModuleName();
+ D.printFullRow(HeaderProvider.format(Header, ItemIsRight));
+
+ const auto *L = &Item;
+
+ BinaryPathProvider PathProvider(28);
+ auto Iter = llvm::find_if(
+ Other, [&PathProvider, ItemIsRight,
+ L](const std::pair<uint32_t, DbiModuleDescriptor> &Other) {
+ const auto *Left = L;
+ const auto *Right = &Other;
+ if (ItemIsRight)
+ std::swap(Left, Right);
+ DiffResult Result = PathProvider.compare(Left->second.getModuleName(),
+ Right->second.getModuleName());
+ return Result == DiffResult::EQUIVALENT ||
+ Result == DiffResult::IDENTICAL;
+ });
+ if (Iter == Other.end()) {
+ // We didn't find this module at all on the other side. Just print one row
+ // and continue.
+ D.print<ModiProvider>("- Modi", Item.first, None);
+ return;
}
- HasDiff |= diffAndPrint("Age", File1, File2, IS1.getAge(), IS2.getAge());
- HasDiff |= diffAndPrint("Guid", File1, File2, IS1.getGuid(), IS2.getGuid());
- HasDiff |= diffAndPrint("Signature", File1, File2, IS1.getSignature(),
- IS2.getSignature());
- HasDiff |=
- diffAndPrint("Version", File1, File2, IS1.getVersion(), IS2.getVersion());
- HasDiff |= diffAndPrint("Features", File1, File2, IS1.getFeatureSignatures(),
- IS2.getFeatureSignatures());
- HasDiff |= diffAndPrint("Named Stream Byte Size", File1, File2,
- IS1.getNamedStreamMapByteSize(),
- IS2.getNamedStreamMapByteSize());
- SmallVector<StringRef, 4> NS1;
- SmallVector<StringRef, 4> NS2;
- for (const auto &X : IS1.getNamedStreams().entries())
- NS1.push_back(X.getKey());
- for (const auto &X : IS2.getNamedStreams().entries())
- NS2.push_back(X.getKey());
- SmallVector<StringRef, 4> OnlyP;
- SmallVector<StringRef, 4> OnlyQ;
- set_differences(NS1, NS2, &OnlyP, &OnlyQ);
- printSymmetricDifferences(File1, File2, OnlyP, OnlyQ, "Named Streams");
- if (!HasDiff)
- outs() << "PDB Stream: No differences detected!\n";
- return Error::success();
+ // We did find this module. Go through and compare each field.
+ const auto *R = &*Iter;
+ if (ItemIsRight)
+ std::swap(L, R);
+
+ D.print<ModiProvider>("- Modi", L->first, R->first);
+ D.print<BinaryPathProvider>("- Obj File Name", L->second.getObjFileName(),
+ R->second.getObjFileName(), PathProvider);
+ D.print<StreamNumberProvider>("- Debug Stream",
+ L->second.getModuleStreamIndex(),
+ R->second.getModuleStreamIndex());
+ D.print("- C11 Byte Size", L->second.getC11LineInfoByteSize(),
+ R->second.getC11LineInfoByteSize());
+ D.print("- C13 Byte Size", L->second.getC13LineInfoByteSize(),
+ R->second.getC13LineInfoByteSize());
+ D.print("- # of files", L->second.getNumberOfFiles(),
+ R->second.getNumberOfFiles());
+ D.print("- Pdb File Path Index", L->second.getPdbFilePathNameIndex(),
+ R->second.getPdbFilePathNameIndex());
+ D.print("- Source File Name Index", L->second.getSourceFileNameIndex(),
+ R->second.getSourceFileNameIndex());
+ D.print("- Symbol Byte Size", L->second.getSymbolDebugInfoByteSize(),
+ R->second.getSymbolDebugInfoByteSize());
+ Other.erase(Iter);
}
-Error DiffStyle::diffDbiStream() { return Error::success(); }
+Error DiffStyle::diffDbiStream() {
+ DiffPrinter D(2, "DBI Stream", 40, 30, opts::diff::PrintResultColumn,
+ opts::diff::PrintValueColumns, outs());
+ D.printExplicit("File", DiffResult::UNSPECIFIED,
+ truncateStringFront(File1.getFilePath(), 28),
+ truncateStringFront(File2.getFilePath(), 28));
+
+ auto ExpectedDbi1 = File1.getPDBDbiStream();
+ auto ExpectedDbi2 = File2.getPDBDbiStream();
+
+ bool Has1 = !!ExpectedDbi1;
+ bool Has2 = !!ExpectedDbi2;
+ if (!(Has1 && Has2)) {
+ std::string L = Has1 ? "(present)" : "(not present)";
+ std::string R = Has2 ? "(present)" : "(not present)";
+ D.print("Stream", L, R);
+
+ consumeError(ExpectedDbi1.takeError());
+ consumeError(ExpectedDbi2.takeError());
+ return Error::success();
+ }
+
+ auto &DL = *ExpectedDbi1;
+ auto &DR = *ExpectedDbi2;
+
+ D.print("Dbi Version", (uint32_t)DL.getDbiVersion(),
+ (uint32_t)DR.getDbiVersion());
+ D.print("Age", DL.getAge(), DR.getAge());
+ D.print("Machine", (uint16_t)DL.getMachineType(),
+ (uint16_t)DR.getMachineType());
+ D.print("Flags", DL.getFlags(), DR.getFlags());
+ D.print("Build Major", DL.getBuildMajorVersion(), DR.getBuildMajorVersion());
+ D.print("Build Minor", DL.getBuildMinorVersion(), DR.getBuildMinorVersion());
+ D.print("Build Number", DL.getBuildNumber(), DR.getBuildNumber());
+ D.print("PDB DLL Version", DL.getPdbDllVersion(), DR.getPdbDllVersion());
+ D.print("PDB DLL RBLD", DL.getPdbDllRbld(), DR.getPdbDllRbld());
+ D.print<StreamNumberProvider>("DBG (FPO)",
+ DL.getDebugStreamIndex(DbgHeaderType::FPO),
+ DR.getDebugStreamIndex(DbgHeaderType::FPO));
+ D.print<StreamNumberProvider>(
+ "DBG (Exception)", DL.getDebugStreamIndex(DbgHeaderType::Exception),
+ DR.getDebugStreamIndex(DbgHeaderType::Exception));
+ D.print<StreamNumberProvider>("DBG (Fixup)",
+ DL.getDebugStreamIndex(DbgHeaderType::Fixup),
+ DR.getDebugStreamIndex(DbgHeaderType::Fixup));
+ D.print<StreamNumberProvider>(
+ "DBG (OmapToSrc)", DL.getDebugStreamIndex(DbgHeaderType::OmapToSrc),
+ DR.getDebugStreamIndex(DbgHeaderType::OmapToSrc));
+ D.print<StreamNumberProvider>(
+ "DBG (OmapFromSrc)", DL.getDebugStreamIndex(DbgHeaderType::OmapFromSrc),
+ DR.getDebugStreamIndex(DbgHeaderType::OmapFromSrc));
+ D.print<StreamNumberProvider>(
+ "DBG (SectionHdr)", DL.getDebugStreamIndex(DbgHeaderType::SectionHdr),
+ DR.getDebugStreamIndex(DbgHeaderType::SectionHdr));
+ D.print<StreamNumberProvider>(
+ "DBG (TokenRidMap)", DL.getDebugStreamIndex(DbgHeaderType::TokenRidMap),
+ DR.getDebugStreamIndex(DbgHeaderType::TokenRidMap));
+ D.print<StreamNumberProvider>("DBG (Xdata)",
+ DL.getDebugStreamIndex(DbgHeaderType::Xdata),
+ DR.getDebugStreamIndex(DbgHeaderType::Xdata));
+ D.print<StreamNumberProvider>("DBG (Pdata)",
+ DL.getDebugStreamIndex(DbgHeaderType::Pdata),
+ DR.getDebugStreamIndex(DbgHeaderType::Pdata));
+ D.print<StreamNumberProvider>("DBG (NewFPO)",
+ DL.getDebugStreamIndex(DbgHeaderType::NewFPO),
+ DR.getDebugStreamIndex(DbgHeaderType::NewFPO));
+ D.print<StreamNumberProvider>(
+ "DBG (SectionHdrOrig)",
+ DL.getDebugStreamIndex(DbgHeaderType::SectionHdrOrig),
+ DR.getDebugStreamIndex(DbgHeaderType::SectionHdrOrig));
+ D.print<StreamNumberProvider>("Globals Stream",
+ DL.getGlobalSymbolStreamIndex(),
+ DR.getGlobalSymbolStreamIndex());
+ D.print<StreamNumberProvider>("Publics Stream",
+ DL.getPublicSymbolStreamIndex(),
+ DR.getPublicSymbolStreamIndex());
+ D.print<StreamNumberProvider>("Symbol Records", DL.getSymRecordStreamIndex(),
+ DR.getSymRecordStreamIndex());
+ D.print("Has CTypes", DL.hasCTypes(), DR.hasCTypes());
+ D.print("Is Incrementally Linked", DL.isIncrementallyLinked(),
+ DR.isIncrementallyLinked());
+ D.print("Is Stripped", DL.isStripped(), DR.isStripped());
+ const DbiModuleList &ML = DL.modules();
+ const DbiModuleList &MR = DR.modules();
+ D.print("Module Count", ML.getModuleCount(), MR.getModuleCount());
+ D.print("Source File Count", ML.getSourceFileCount(),
+ MR.getSourceFileCount());
+ auto MDL = getModuleDescriptors(ML);
+ auto MDR = getModuleDescriptors(MR);
+ // Scan all module descriptors from the left, and look for corresponding
+ // module descriptors on the right.
+ for (const auto &L : MDL)
+ diffOneModule(D, L, MDR, false);
+
+ for (const auto &R : MDR)
+ diffOneModule(D, R, MDL, true);
+
+ return Error::success();
+}
Error DiffStyle::diffSectionContribs() { return Error::success(); }
diff --git a/tools/llvm-pdbutil/DiffPrinter.cpp b/tools/llvm-pdbutil/DiffPrinter.cpp
new file mode 100644
index 000000000000..dd61cc182593
--- /dev/null
+++ b/tools/llvm-pdbutil/DiffPrinter.cpp
@@ -0,0 +1,147 @@
+
+#include "DiffPrinter.h"
+
+#include "llvm/Support/FormatAdapters.h"
+
+using namespace llvm;
+using namespace llvm::pdb;
+
+namespace {
+struct Colorize {
+ Colorize(raw_ostream &OS, DiffResult Result) : OS(OS) {
+ if (!OS.has_colors())
+ return;
+ switch (Result) {
+ case DiffResult::IDENTICAL:
+ OS.changeColor(raw_ostream::Colors::GREEN, false);
+ break;
+ case DiffResult::EQUIVALENT:
+ OS.changeColor(raw_ostream::Colors::YELLOW, true);
+ break;
+ default:
+ OS.changeColor(raw_ostream::Colors::RED, false);
+ break;
+ }
+ }
+
+ ~Colorize() {
+ if (OS.has_colors())
+ OS.resetColor();
+ }
+
+ raw_ostream &OS;
+};
+}
+
+DiffPrinter::DiffPrinter(uint32_t Indent, StringRef Header,
+ uint32_t PropertyWidth, uint32_t FieldWidth,
+ bool Result, bool Fields, raw_ostream &Stream)
+ : PrintResult(Result), PrintValues(Fields), Indent(Indent),
+ PropertyWidth(PropertyWidth), FieldWidth(FieldWidth), OS(Stream) {
+ printHeaderRow();
+ printFullRow(Header);
+}
+
+DiffPrinter::~DiffPrinter() {}
+
+uint32_t DiffPrinter::tableWidth() const {
+ // `|`
+ uint32_t W = 1;
+
+ // `<width>|`
+ W += PropertyWidth + 1;
+
+ if (PrintResult) {
+ // ` I |`
+ W += 4;
+ }
+
+ if (PrintValues) {
+ // `<width>|<width>|`
+ W += 2 * (FieldWidth + 1);
+ }
+ return W;
+}
+
+void DiffPrinter::printFullRow(StringRef Text) {
+ newLine();
+ printValue(Text, DiffResult::UNSPECIFIED, AlignStyle::Center,
+ tableWidth() - 2, true);
+ printSeparatorRow();
+}
+
+void DiffPrinter::printSeparatorRow() {
+ newLine();
+ OS << formatv("{0}", fmt_repeat('-', PropertyWidth));
+ if (PrintResult) {
+ OS << '+';
+ OS << formatv("{0}", fmt_repeat('-', 3));
+ }
+ if (PrintValues) {
+ OS << '+';
+ OS << formatv("{0}", fmt_repeat('-', FieldWidth));
+ OS << '+';
+ OS << formatv("{0}", fmt_repeat('-', FieldWidth));
+ }
+ OS << '|';
+}
+
+void DiffPrinter::printHeaderRow() {
+ newLine('-');
+ OS << formatv("{0}", fmt_repeat('-', tableWidth() - 1));
+}
+
+void DiffPrinter::newLine(char InitialChar) {
+ OS << "\n";
+ OS.indent(Indent) << InitialChar;
+}
+
+void DiffPrinter::printExplicit(StringRef Property, DiffResult C,
+ StringRef Left, StringRef Right) {
+ newLine();
+ printValue(Property, DiffResult::UNSPECIFIED, AlignStyle::Right,
+ PropertyWidth, true);
+ printResult(C);
+ printValue(Left, C, AlignStyle::Center, FieldWidth, false);
+ printValue(Right, C, AlignStyle::Center, FieldWidth, false);
+ printSeparatorRow();
+}
+
+void DiffPrinter::printResult(DiffResult Result) {
+ if (!PrintResult)
+ return;
+ switch (Result) {
+ case DiffResult::DIFFERENT:
+ printValue("D", Result, AlignStyle::Center, 3, true);
+ break;
+ case DiffResult::EQUIVALENT:
+ printValue("E", Result, AlignStyle::Center, 3, true);
+ break;
+ case DiffResult::IDENTICAL:
+ printValue("I", Result, AlignStyle::Center, 3, true);
+ break;
+ case DiffResult::UNSPECIFIED:
+ printValue(" ", Result, AlignStyle::Center, 3, true);
+ break;
+ }
+}
+
+void DiffPrinter::printValue(StringRef Value, DiffResult C, AlignStyle Style,
+ uint32_t Width, bool Force) {
+ if (!Force && !PrintValues)
+ return;
+
+ if (Style == AlignStyle::Right)
+ --Width;
+
+ std::string FormattedItem =
+ formatv("{0}", fmt_align(Value, Style, Width)).str();
+ if (C != DiffResult::UNSPECIFIED) {
+ Colorize Color(OS, C);
+ OS << FormattedItem;
+ } else
+ OS << FormattedItem;
+ if (Style == AlignStyle::Right)
+ OS << ' ';
+ OS << '|';
+}
diff --git a/tools/llvm-pdbutil/DiffPrinter.h b/tools/llvm-pdbutil/DiffPrinter.h
new file mode 100644
index 000000000000..475747d8dc11
--- /dev/null
+++ b/tools/llvm-pdbutil/DiffPrinter.h
@@ -0,0 +1,172 @@
+//===- DiffPrinter.h ------------------------------------------ *- C++ --*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_LLVMPDBDUMP_DIFFPRINTER_H
+#define LLVM_TOOLS_LLVMPDBDUMP_DIFFPRINTER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <list>
+#include <unordered_set>
+
+namespace std {
+template <> struct hash<llvm::pdb::PdbRaw_FeatureSig> {
+ typedef llvm::pdb::PdbRaw_FeatureSig argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(argument_type Item) const {
+ return std::hash<uint32_t>{}(uint32_t(Item));
+ }
+};
+} // namespace std
+
+namespace llvm {
+namespace pdb {
+
+class PDBFile;
+
+enum class DiffResult { UNSPECIFIED, IDENTICAL, EQUIVALENT, DIFFERENT };
+
+struct IdenticalDiffProvider {
+ template <typename T, typename U>
+ DiffResult compare(const T &Left, const U &Right) {
+ return (Left == Right) ? DiffResult::IDENTICAL : DiffResult::DIFFERENT;
+ }
+
+ template <typename T> std::string format(const T &Item, bool Right) {
+ return formatv("{0}", Item).str();
+ }
+};
+
+struct EquivalentDiffProvider {
+ template <typename T, typename U>
+ DiffResult compare(const T &Left, const U &Right) {
+ return (Left == Right) ? DiffResult::IDENTICAL : DiffResult::EQUIVALENT;
+ }
+
+ template <typename T> std::string format(const T &Item, bool Right) {
+ return formatv("{0}", Item).str();
+ }
+};
+
+class DiffPrinter {
+public:
+ DiffPrinter(uint32_t Indent, StringRef Header, uint32_t PropertyWidth,
+ uint32_t FieldWidth, bool Result, bool Values,
+ raw_ostream &Stream);
+ ~DiffPrinter();
+
+ template <typename T, typename U> struct Identical {};
+
+ template <typename Provider = IdenticalDiffProvider, typename T, typename U>
+ void print(StringRef Property, const T &Left, const U &Right,
+ Provider P = Provider()) {
+ std::string L = P.format(Left, false);
+ std::string R = P.format(Right, true);
+
+ DiffResult Result = P.compare(Left, Right);
+ printExplicit(Property, Result, L, R);
+ }
+
+ void printExplicit(StringRef Property, DiffResult C, StringRef Left,
+ StringRef Right);
+
+ template <typename T, typename U>
+ void printExplicit(StringRef Property, DiffResult C, const T &Left,
+ const U &Right) {
+ std::string L = formatv("{0}", Left).str();
+ std::string R = formatv("{0}", Right).str();
+ printExplicit(Property, C, StringRef(L), StringRef(R));
+ }
+
+ template <typename T, typename U>
+ void diffUnorderedArray(StringRef Property, ArrayRef<T> Left,
+ ArrayRef<U> Right) {
+ std::unordered_set<T> LS(Left.begin(), Left.end());
+ std::unordered_set<U> RS(Right.begin(), Right.end());
+ std::string Count1 = formatv("{0} element(s)", Left.size());
+ std::string Count2 = formatv("{0} element(s)", Right.size());
+ print(std::string(Property) + "s (set)", Count1, Count2);
+ for (const auto &L : LS) {
+ auto Iter = RS.find(L);
+ std::string Text = formatv("{0}", L).str();
+ if (Iter == RS.end()) {
+ print(Property, Text, "(not present)");
+ continue;
+ }
+ print(Property, Text, Text);
+ RS.erase(Iter);
+ }
+ for (const auto &R : RS) {
+ auto Iter = LS.find(R);
+ std::string Text = formatv("{0}", R).str();
+ if (Iter == LS.end()) {
+ print(Property, "(not present)", Text);
+ continue;
+ }
+ print(Property, Text, Text);
+ }
+ }
+
+ template <typename ValueProvider = IdenticalDiffProvider, typename T,
+ typename U>
+ void diffUnorderedMap(StringRef Property, const StringMap<T> &Left,
+ const StringMap<U> &Right,
+ ValueProvider P = ValueProvider()) {
+ StringMap<U> RightCopy(Right);
+
+ std::string Count1 = formatv("{0} element(s)", Left.size());
+ std::string Count2 = formatv("{0} element(s)", Right.size());
+ print(std::string(Property) + "s (map)", Count1, Count2);
+
+ for (const auto &L : Left) {
+ auto Iter = RightCopy.find(L.getKey());
+ if (Iter == RightCopy.end()) {
+ printExplicit(L.getKey(), DiffResult::DIFFERENT, L.getValue(),
+ "(not present)");
+ continue;
+ }
+
+ print(L.getKey(), L.getValue(), Iter->getValue(), P);
+ RightCopy.erase(Iter);
+ }
+
+ for (const auto &R : RightCopy) {
+ printExplicit(R.getKey(), DiffResult::DIFFERENT, "(not present)",
+ R.getValue());
+ }
+ }
+
+ void printFullRow(StringRef Text);
+
+private:
+ uint32_t tableWidth() const;
+
+ void printHeaderRow();
+ void printSeparatorRow();
+ void newLine(char InitialChar = '|');
+ void printValue(StringRef Value, DiffResult C, AlignStyle Style,
+ uint32_t Width, bool Force);
+ void printResult(DiffResult Result);
+
+ bool PrintResult;
+ bool PrintValues;
+ uint32_t Indent;
+ uint32_t PropertyWidth;
+ uint32_t FieldWidth;
+ raw_ostream &OS;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/tools/llvm-pdbutil/DumpOutputStyle.cpp b/tools/llvm-pdbutil/DumpOutputStyle.cpp
index a1f919b4dd06..0642d841fd9f 100644
--- a/tools/llvm-pdbutil/DumpOutputStyle.cpp
+++ b/tools/llvm-pdbutil/DumpOutputStyle.cpp
@@ -418,6 +418,13 @@ Error DumpOutputStyle::dumpModules() {
P.formatLine(" debug stream: {0}, # files: {1}, has ec info: {2}",
Modi.getModuleStreamIndex(), Modi.getNumberOfFiles(),
Modi.hasECInfo());
+ StringRef PdbFilePath =
+ Err(Stream.getECName(Modi.getPdbFilePathNameIndex()));
+ StringRef SrcFilePath =
+ Err(Stream.getECName(Modi.getSourceFileNameIndex()));
+ P.formatLine(" pdb file ni: {0} `{1}`, src file ni: {2} `{3}`",
+ Modi.getPdbFilePathNameIndex(), PdbFilePath,
+ Modi.getSourceFileNameIndex(), SrcFilePath);
}
return Error::success();
}
diff --git a/tools/llvm-pdbutil/FormatUtil.cpp b/tools/llvm-pdbutil/FormatUtil.cpp
index 1bbe2724f0ab..02030272dd4d 100644
--- a/tools/llvm-pdbutil/FormatUtil.cpp
+++ b/tools/llvm-pdbutil/FormatUtil.cpp
@@ -16,6 +16,58 @@
using namespace llvm;
using namespace llvm::pdb;
+std::string llvm::pdb::truncateStringBack(StringRef S, uint32_t MaxLen) {
+ if (MaxLen == 0 || S.size() <= MaxLen || S.size() <= 3)
+ return S;
+
+ assert(MaxLen >= 3);
+ uint32_t FinalLen = std::min<size_t>(S.size(), MaxLen - 3);
+ S = S.take_front(FinalLen);
+ return std::string(S) + std::string("...");
+}
+
+std::string llvm::pdb::truncateStringMiddle(StringRef S, uint32_t MaxLen) {
+ if (MaxLen == 0 || S.size() <= MaxLen || S.size() <= 3)
+ return S;
+
+ assert(MaxLen >= 3);
+ uint32_t FinalLen = std::min<size_t>(S.size(), MaxLen - 3);
+ StringRef Front = S.take_front(FinalLen / 2);
+ StringRef Back = S.take_back(Front.size());
+ return std::string(Front) + std::string("...") + std::string(Back);
+}
+
+std::string llvm::pdb::truncateStringFront(StringRef S, uint32_t MaxLen) {
+ if (MaxLen == 0 || S.size() <= MaxLen || S.size() <= 3)
+ return S;
+
+ assert(MaxLen >= 3);
+ S = S.take_back(MaxLen - 3);
+ return std::string("...") + std::string(S);
+}
+
+std::string llvm::pdb::truncateQuotedNameFront(StringRef Label, StringRef Name,
+ uint32_t MaxLen) {
+ uint32_t RequiredExtraChars = Label.size() + 1 + 2;
+ if (MaxLen == 0 || RequiredExtraChars + Name.size() <= MaxLen)
+ return formatv("{0} \"{1}\"", Label, Name).str();
+
+ assert(MaxLen >= RequiredExtraChars);
+ std::string TN = truncateStringFront(Name, MaxLen - RequiredExtraChars);
+ return formatv("{0} \"{1}\"", Label, TN).str();
+}
+
+std::string llvm::pdb::truncateQuotedNameBack(StringRef Label, StringRef Name,
+ uint32_t MaxLen) {
+ uint32_t RequiredExtraChars = Label.size() + 1 + 2;
+ if (MaxLen == 0 || RequiredExtraChars + Name.size() <= MaxLen)
+ return formatv("{0} \"{1}\"", Label, Name).str();
+
+ assert(MaxLen >= RequiredExtraChars);
+ std::string TN = truncateStringBack(Name, MaxLen - RequiredExtraChars);
+ return formatv("{0} \"{1}\"", Label, TN).str();
+}
+
std::string llvm::pdb::typesetItemList(ArrayRef<std::string> Opts,
uint32_t IndentLevel, uint32_t GroupSize,
StringRef Sep) {
diff --git a/tools/llvm-pdbutil/FormatUtil.h b/tools/llvm-pdbutil/FormatUtil.h
index 3db2dbacc57b..df32ed9360fb 100644
--- a/tools/llvm-pdbutil/FormatUtil.h
+++ b/tools/llvm-pdbutil/FormatUtil.h
@@ -22,6 +22,14 @@
namespace llvm {
namespace pdb {
+std::string truncateStringBack(StringRef S, uint32_t MaxLen);
+std::string truncateStringMiddle(StringRef S, uint32_t MaxLen);
+std::string truncateStringFront(StringRef S, uint32_t MaxLen);
+std::string truncateQuotedNameFront(StringRef Label, StringRef Name,
+ uint32_t MaxLen);
+std::string truncateQuotedNameBack(StringRef Label, StringRef Name,
+ uint32_t MaxLen);
+
#define PUSH_MASKED_FLAG(Enum, Mask, TheOpt, Value, Text) \
if (Enum::TheOpt == (Value & Mask)) \
Opts.push_back(Text);
@@ -33,7 +41,7 @@ namespace pdb {
case Enum::X: \
return Ret;
-template <typename T> static std::string formatUnknownEnum(T Value) {
+template <typename T> std::string formatUnknownEnum(T Value) {
return formatv("unknown ({0})",
static_cast<typename std::underlying_type<T>::type>(Value))
.str();
diff --git a/tools/llvm-pdbutil/MinimalTypeDumper.cpp b/tools/llvm-pdbutil/MinimalTypeDumper.cpp
index 1af53e35ed11..9621320ea99a 100644
--- a/tools/llvm-pdbutil/MinimalTypeDumper.cpp
+++ b/tools/llvm-pdbutil/MinimalTypeDumper.cpp
@@ -299,7 +299,7 @@ Error MinimalTypeDumpVisitor::visitKnownRecord(CVType &CVR,
Error MinimalTypeDumpVisitor::visitKnownRecord(CVType &CVR,
ClassRecord &Class) {
- P.formatLine("class name: `{0}`", Class.Name);
+ P.format(" `{0}`", Class.Name);
if (Class.hasUniqueName())
P.formatLine("unique name: `{0}`", Class.UniqueName);
P.formatLine("vtable: {0}, base list: {1}, field list: {2}",
@@ -311,7 +311,7 @@ Error MinimalTypeDumpVisitor::visitKnownRecord(CVType &CVR,
Error MinimalTypeDumpVisitor::visitKnownRecord(CVType &CVR,
UnionRecord &Union) {
- P.formatLine("class name: `{0}`", Union.Name);
+ P.format(" `{0}`", Union.Name);
if (Union.hasUniqueName())
P.formatLine("unique name: `{0}`", Union.UniqueName);
P.formatLine("field list: {0}", Union.FieldList);
@@ -321,7 +321,7 @@ Error MinimalTypeDumpVisitor::visitKnownRecord(CVType &CVR,
}
Error MinimalTypeDumpVisitor::visitKnownRecord(CVType &CVR, EnumRecord &Enum) {
- P.formatLine("name: `{0}`", Enum.Name);
+ P.format(" `{0}`", Enum.Name);
if (Enum.hasUniqueName())
P.formatLine("unique name: `{0}`", Enum.UniqueName);
P.formatLine("field list: {0}, underlying type: {1}", Enum.FieldList,
diff --git a/tools/llvm-pdbutil/StreamUtil.cpp b/tools/llvm-pdbutil/StreamUtil.cpp
index 81aa256b5002..4d352004dec3 100644
--- a/tools/llvm-pdbutil/StreamUtil.cpp
+++ b/tools/llvm-pdbutil/StreamUtil.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "StreamUtil.h"
+#include "FormatUtil.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
@@ -18,11 +19,12 @@
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
#include "llvm/DebugInfo/PDB/Native/TpiStream.h"
-namespace llvm {
-namespace pdb {
-void discoverStreamPurposes(PDBFile &File,
- SmallVectorImpl<std::string> &Purposes) {
+using namespace llvm;
+using namespace llvm::pdb;
+void llvm::pdb::discoverStreamPurposes(
+ PDBFile &File,
+ SmallVectorImpl<std::pair<StreamPurpose, std::string>> &Purposes) {
// It's OK if we fail to load some of these streams, we still attempt to print
// what we can.
auto Dbi = File.getPDBDbiStream();
@@ -52,74 +54,72 @@ void discoverStreamPurposes(PDBFile &File,
Purposes.resize(StreamCount);
for (uint16_t StreamIdx = 0; StreamIdx < StreamCount; ++StreamIdx) {
- std::string Value;
+ std::pair<StreamPurpose, std::string> Value;
if (StreamIdx == OldMSFDirectory)
- Value = "Old MSF Directory";
+ Value = std::make_pair(StreamPurpose::Other, "Old MSF Directory");
else if (StreamIdx == StreamPDB)
- Value = "PDB Stream";
+ Value = std::make_pair(StreamPurpose::Other, "PDB Stream");
else if (StreamIdx == StreamDBI)
- Value = "DBI Stream";
+ Value = std::make_pair(StreamPurpose::Other, "DBI Stream");
else if (StreamIdx == StreamTPI)
- Value = "TPI Stream";
+ Value = std::make_pair(StreamPurpose::Other, "TPI Stream");
else if (StreamIdx == StreamIPI)
- Value = "IPI Stream";
+ Value = std::make_pair(StreamPurpose::Other, "IPI Stream");
else if (Dbi && StreamIdx == Dbi->getGlobalSymbolStreamIndex())
- Value = "Global Symbol Hash";
+ Value = std::make_pair(StreamPurpose::Other, "Global Symbol Hash");
else if (Dbi && StreamIdx == Dbi->getPublicSymbolStreamIndex())
- Value = "Public Symbol Hash";
+ Value = std::make_pair(StreamPurpose::Other, "Public Symbol Hash");
else if (Dbi && StreamIdx == Dbi->getSymRecordStreamIndex())
- Value = "Public Symbol Records";
+ Value = std::make_pair(StreamPurpose::Other, "Public Symbol Records");
else if (Tpi && StreamIdx == Tpi->getTypeHashStreamIndex())
- Value = "TPI Hash";
+ Value = std::make_pair(StreamPurpose::Other, "TPI Hash");
else if (Tpi && StreamIdx == Tpi->getTypeHashStreamAuxIndex())
- Value = "TPI Aux Hash";
+ Value = std::make_pair(StreamPurpose::Other, "TPI Aux Hash");
else if (Ipi && StreamIdx == Ipi->getTypeHashStreamIndex())
- Value = "IPI Hash";
+ Value = std::make_pair(StreamPurpose::Other, "IPI Hash");
else if (Ipi && StreamIdx == Ipi->getTypeHashStreamAuxIndex())
- Value = "IPI Aux Hash";
+ Value = std::make_pair(StreamPurpose::Other, "IPI Aux Hash");
else if (Dbi &&
StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::Exception))
- Value = "Exception Data";
+ Value = std::make_pair(StreamPurpose::Other, "Exception Data");
else if (Dbi && StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::Fixup))
- Value = "Fixup Data";
+ Value = std::make_pair(StreamPurpose::Other, "Fixup Data");
else if (Dbi && StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::FPO))
- Value = "FPO Data";
+ Value = std::make_pair(StreamPurpose::Other, "FPO Data");
else if (Dbi &&
StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::NewFPO))
- Value = "New FPO Data";
+ Value = std::make_pair(StreamPurpose::Other, "New FPO Data");
else if (Dbi &&
StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::OmapFromSrc))
- Value = "Omap From Source Data";
+ Value = std::make_pair(StreamPurpose::Other, "Omap From Source Data");
else if (Dbi &&
StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::OmapToSrc))
- Value = "Omap To Source Data";
+ Value = std::make_pair(StreamPurpose::Other, "Omap To Source Data");
else if (Dbi && StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::Pdata))
- Value = "Pdata";
+ Value = std::make_pair(StreamPurpose::Other, "Pdata");
else if (Dbi &&
StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::SectionHdr))
- Value = "Section Header Data";
+ Value = std::make_pair(StreamPurpose::Other, "Section Header Data");
else if (Dbi &&
StreamIdx ==
Dbi->getDebugStreamIndex(DbgHeaderType::SectionHdrOrig))
- Value = "Section Header Original Data";
+ Value =
+ std::make_pair(StreamPurpose::Other, "Section Header Original Data");
else if (Dbi &&
StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::TokenRidMap))
- Value = "Token Rid Data";
+ Value = std::make_pair(StreamPurpose::Other, "Token Rid Data");
else if (Dbi && StreamIdx == Dbi->getDebugStreamIndex(DbgHeaderType::Xdata))
- Value = "Xdata";
+ Value = std::make_pair(StreamPurpose::Other, "Xdata");
else {
auto ModIter = ModStreams.find(StreamIdx);
auto NSIter = NamedStreams.find(StreamIdx);
if (ModIter != ModStreams.end()) {
- Value = "Module \"";
- Value += ModIter->second.getModuleName();
- Value += "\"";
+ Value = std::make_pair(StreamPurpose::ModuleStream,
+ ModIter->second.getModuleName());
} else if (NSIter != NamedStreams.end()) {
- Value = "Named Stream \"";
- Value += NSIter->second;
- Value += "\"";
+ Value = std::make_pair(StreamPurpose::NamedStream, NSIter->second);
} else {
- Value = "???";
+ Value = std::make_pair(StreamPurpose::Other, "???");
}
}
Purposes[StreamIdx] = Value;
@@ -135,5 +135,18 @@ void discoverStreamPurposes(PDBFile &File,
if (!Info)
consumeError(Info.takeError());
}
-}
+
+void llvm::pdb::discoverStreamPurposes(PDBFile &File,
+ SmallVectorImpl<std::string> &Purposes) {
+ SmallVector<std::pair<StreamPurpose, std::string>, 24> SP;
+ discoverStreamPurposes(File, SP);
+ Purposes.reserve(SP.size());
+ for (const auto &P : SP) {
+ if (P.first == StreamPurpose::NamedStream)
+ Purposes.push_back(formatv("Named Stream \"{0}\"", P.second));
+ else if (P.first == StreamPurpose::ModuleStream)
+ Purposes.push_back(formatv("Module \"{0}\"", P.second));
+ else
+ Purposes.push_back(P.second);
+ }
}
diff --git a/tools/llvm-pdbutil/StreamUtil.h b/tools/llvm-pdbutil/StreamUtil.h
index b5c0beba44fe..f49c0a0eceb6 100644
--- a/tools/llvm-pdbutil/StreamUtil.h
+++ b/tools/llvm-pdbutil/StreamUtil.h
@@ -17,8 +17,13 @@
namespace llvm {
namespace pdb {
class PDBFile;
+enum class StreamPurpose { NamedStream, ModuleStream, Other };
+
void discoverStreamPurposes(PDBFile &File,
SmallVectorImpl<std::string> &Purposes);
+void discoverStreamPurposes(
+ PDBFile &File,
+ SmallVectorImpl<std::pair<StreamPurpose, std::string>> &Purposes);
}
}
diff --git a/tools/llvm-pdbutil/llvm-pdbutil.cpp b/tools/llvm-pdbutil/llvm-pdbutil.cpp
index ad11ad498000..6aa08ff3cd87 100644
--- a/tools/llvm-pdbutil/llvm-pdbutil.cpp
+++ b/tools/llvm-pdbutil/llvm-pdbutil.cpp
@@ -284,9 +284,32 @@ cl::opt<bool> NoEnumDefs("no-enum-definitions",
}
namespace diff {
-cl::list<std::string> InputFilenames(cl::Positional,
- cl::desc("<first> <second>"),
- cl::OneOrMore, cl::sub(DiffSubcommand));
+cl::opt<bool> PrintValueColumns(
+ "values", cl::init(true),
+ cl::desc("Print one column for each PDB with the field value"),
+ cl::Optional, cl::sub(DiffSubcommand));
+cl::opt<bool>
+ PrintResultColumn("result", cl::init(false),
+ cl::desc("Print a column with the result status"),
+ cl::Optional, cl::sub(DiffSubcommand));
+
+cl::opt<std::string> LeftRoot(
+ "left-bin-root", cl::Optional,
+ cl::desc("Treats the specified path as the root of the tree containing "
+ "binaries referenced by the left PDB. The root is stripped from "
+ "embedded paths when doing equality comparisons."),
+ cl::sub(DiffSubcommand));
+cl::opt<std::string> RightRoot(
+ "right-bin-root", cl::Optional,
+ cl::desc("Treats the specified path as the root of the tree containing "
+ "binaries referenced by the right PDB. The root is stripped from "
+ "embedded paths when doing equality comparisons"),
+ cl::sub(DiffSubcommand));
+
+cl::opt<std::string> Left(cl::Positional, cl::desc("<left>"),
+ cl::sub(DiffSubcommand));
+cl::opt<std::string> Right(cl::Positional, cl::desc("<right>"),
+ cl::sub(DiffSubcommand));
}
cl::OptionCategory FileOptions("Module & File Options");
@@ -399,7 +422,7 @@ cl::opt<bool> DumpTypeExtras("type-extras",
cl::cat(TypeOptions), cl::sub(DumpSubcommand));
cl::list<uint32_t> DumpTypeIndex(
- "type-index", cl::ZeroOrMore,
+ "type-index", cl::ZeroOrMore, cl::CommaSeparated,
cl::desc("only dump types with the specified hexadecimal type index"),
cl::cat(TypeOptions), cl::sub(DumpSubcommand));
@@ -415,7 +438,7 @@ cl::opt<bool> DumpIdExtras("id-extras",
cl::desc("dump id hashes and index offsets"),
cl::cat(TypeOptions), cl::sub(DumpSubcommand));
cl::list<uint32_t> DumpIdIndex(
- "id-index", cl::ZeroOrMore,
+ "id-index", cl::ZeroOrMore, cl::CommaSeparated,
cl::desc("only dump ids with the specified hexadecimal type index"),
cl::cat(TypeOptions), cl::sub(DumpSubcommand));
@@ -1079,6 +1102,11 @@ int main(int argc_, const char *argv_[]) {
if (opts::pdb2yaml::DumpModules)
opts::pdb2yaml::DbiStream = true;
}
+ if (opts::DiffSubcommand) {
+ if (!opts::diff::PrintResultColumn && !opts::diff::PrintValueColumns) {
+ llvm::errs() << "WARNING: No diff columns specified\n";
+ }
+ }
llvm::sys::InitializeCOMRAII COM(llvm::sys::COMThreadingMode::MultiThreaded);
@@ -1137,11 +1165,7 @@ int main(int argc_, const char *argv_[]) {
std::for_each(opts::bytes::InputFilenames.begin(),
opts::bytes::InputFilenames.end(), dumpBytes);
} else if (opts::DiffSubcommand) {
- if (opts::diff::InputFilenames.size() != 2) {
- errs() << "diff subcommand expects exactly 2 arguments.\n";
- exit(1);
- }
- diff(opts::diff::InputFilenames[0], opts::diff::InputFilenames[1]);
+ diff(opts::diff::Left, opts::diff::Right);
} else if (opts::MergeSubcommand) {
if (opts::merge::InputFilenames.size() < 2) {
errs() << "merge subcommand requires at least 2 input files.\n";
diff --git a/tools/llvm-pdbutil/llvm-pdbutil.h b/tools/llvm-pdbutil/llvm-pdbutil.h
index 9ee5866bbeff..4e92e639a127 100644
--- a/tools/llvm-pdbutil/llvm-pdbutil.h
+++ b/tools/llvm-pdbutil/llvm-pdbutil.h
@@ -168,6 +168,13 @@ extern llvm::cl::opt<bool> DumpModuleFiles;
extern llvm::cl::list<ModuleSubsection> DumpModuleSubsections;
extern llvm::cl::opt<bool> DumpModuleSyms;
} // namespace pdb2yaml
+
+namespace diff {
+extern llvm::cl::opt<bool> PrintValueColumns;
+extern llvm::cl::opt<bool> PrintResultColumn;
+extern llvm::cl::opt<std::string> LeftRoot;
+extern llvm::cl::opt<std::string> RightRoot;
+} // namespace diff
}
#endif
diff --git a/tools/llvm-profdata/llvm-profdata.cpp b/tools/llvm-profdata/llvm-profdata.cpp
index e9bc2de82bdf..eee242107dab 100644
--- a/tools/llvm-profdata/llvm-profdata.cpp
+++ b/tools/llvm-profdata/llvm-profdata.cpp
@@ -159,14 +159,20 @@ static void loadInput(const WeightedFile &Input, WriterContext *WC) {
for (auto &I : *Reader) {
const StringRef FuncName = I.Name;
- if (Error E = WC->Writer.addRecord(std::move(I), Input.Weight)) {
+ bool Reported = false;
+ WC->Writer.addRecord(std::move(I), Input.Weight, [&](Error E) {
+ if (Reported) {
+ consumeError(std::move(E));
+ return;
+ }
+ Reported = true;
// Only show hint the first time an error occurs.
instrprof_error IPE = InstrProfError::take(std::move(E));
std::unique_lock<std::mutex> ErrGuard{WC->ErrLock};
bool firstTime = WC->WriterErrorCodes.insert(IPE).second;
handleMergeWriterError(make_error<InstrProfError>(IPE), Input.Filename,
FuncName, firstTime);
- }
+ });
}
if (Reader->hasError())
WC->Err = Reader->getError();
@@ -174,8 +180,15 @@ static void loadInput(const WeightedFile &Input, WriterContext *WC) {
/// Merge the \p Src writer context into \p Dst.
static void mergeWriterContexts(WriterContext *Dst, WriterContext *Src) {
- if (Error E = Dst->Writer.mergeRecordsFromWriter(std::move(Src->Writer)))
+ bool Reported = false;
+ Dst->Writer.mergeRecordsFromWriter(std::move(Src->Writer), [&](Error E) {
+ if (Reported) {
+ consumeError(std::move(E));
+ return;
+ }
+ Reported = true;
Dst->Err = std::move(E);
+ });
}
static void mergeInstrProfile(const WeightedFileVector &Inputs,
@@ -499,8 +512,8 @@ static void showValueSitesStats(raw_fd_ostream &OS, uint32_t VK,
}
static int showInstrProfile(const std::string &Filename, bool ShowCounts,
- bool ShowIndirectCallTargets, bool ShowMemOPSizes,
- bool ShowDetailedSummary,
+ uint32_t TopN, bool ShowIndirectCallTargets,
+ bool ShowMemOPSizes, bool ShowDetailedSummary,
std::vector<uint32_t> DetailedSummaryCutoffs,
bool ShowAllFunctions,
const std::string &ShowFunction, bool TextFormat,
@@ -519,6 +532,17 @@ static int showInstrProfile(const std::string &Filename, bool ShowCounts,
size_t ShownFunctions = 0;
int NumVPKind = IPVK_Last - IPVK_First + 1;
std::vector<ValueSitesStats> VPStats(NumVPKind);
+
+ auto MinCmp = [](const std::pair<std::string, uint64_t> &v1,
+ const std::pair<std::string, uint64_t> &v2) {
+ return v1.second > v2.second;
+ };
+
+ std::priority_queue<std::pair<std::string, uint64_t>,
+ std::vector<std::pair<std::string, uint64_t>>,
+ decltype(MinCmp)>
+ HottestFuncs(MinCmp);
+
for (const auto &Func : *Reader) {
bool Show =
ShowAllFunctions || (!ShowFunction.empty() &&
@@ -528,13 +552,28 @@ static int showInstrProfile(const std::string &Filename, bool ShowCounts,
if (doTextFormatDump) {
InstrProfSymtab &Symtab = Reader->getSymtab();
- InstrProfWriter::writeRecordInText(Func, Symtab, OS);
+ InstrProfWriter::writeRecordInText(Func.Name, Func.Hash, Func, Symtab,
+ OS);
continue;
}
assert(Func.Counts.size() > 0 && "function missing entry counter");
Builder.addRecord(Func);
+ if (TopN) {
+ uint64_t FuncMax = 0;
+ for (size_t I = 0, E = Func.Counts.size(); I < E; ++I)
+ FuncMax = std::max(FuncMax, Func.Counts[I]);
+
+ if (HottestFuncs.size() == TopN) {
+ if (HottestFuncs.top().second < FuncMax) {
+ HottestFuncs.pop();
+ HottestFuncs.emplace(std::make_pair(std::string(Func.Name), FuncMax));
+ }
+ } else
+ HottestFuncs.emplace(std::make_pair(std::string(Func.Name), FuncMax));
+ }
+
if (Show) {
if (!ShownFunctions)
@@ -592,6 +631,18 @@ static int showInstrProfile(const std::string &Filename, bool ShowCounts,
OS << "Maximum function count: " << PS->getMaxFunctionCount() << "\n";
OS << "Maximum internal block count: " << PS->getMaxInternalCount() << "\n";
+ if (TopN) {
+ std::vector<std::pair<std::string, uint64_t>> SortedHottestFuncs;
+ while (!HottestFuncs.empty()) {
+ SortedHottestFuncs.emplace_back(HottestFuncs.top());
+ HottestFuncs.pop();
+ }
+ OS << "Top " << TopN
+ << " functions with the largest internal block counts: \n";
+ for (auto &hotfunc : llvm::reverse(SortedHottestFuncs))
+ OS << " " << hotfunc.first << ", max count = " << hotfunc.second << "\n";
+ }
+
if (ShownFunctions && ShowIndirectCallTargets) {
OS << "Statistics for indirect call sites profile:\n";
showValueSitesStats(OS, IPVK_IndirectCallTarget,
@@ -675,6 +726,9 @@ static int show_main(int argc, const char *argv[]) {
cl::desc("Profile kind:"), cl::init(instr),
cl::values(clEnumVal(instr, "Instrumentation profile (default)"),
clEnumVal(sample, "Sample profile")));
+ cl::opt<uint32_t> TopNFunctions(
+ "topn", cl::init(0),
+ cl::desc("Show the list of functions with the largest internal counts"));
cl::ParseCommandLineOptions(argc, argv, "LLVM profile data summary\n");
@@ -692,10 +746,10 @@ static int show_main(int argc, const char *argv[]) {
std::vector<uint32_t> Cutoffs(DetailedSummaryCutoffs.begin(),
DetailedSummaryCutoffs.end());
if (ProfileKind == instr)
- return showInstrProfile(Filename, ShowCounts, ShowIndirectCallTargets,
- ShowMemOPSizes, ShowDetailedSummary,
- DetailedSummaryCutoffs, ShowAllFunctions,
- ShowFunction, TextFormat, OS);
+ return showInstrProfile(Filename, ShowCounts, TopNFunctions,
+ ShowIndirectCallTargets, ShowMemOPSizes,
+ ShowDetailedSummary, DetailedSummaryCutoffs,
+ ShowAllFunctions, ShowFunction, TextFormat, OS);
else
return showSampleProfile(Filename, ShowCounts, ShowAllFunctions,
ShowFunction, OS);
diff --git a/tools/llvm-readobj/COFFDumper.cpp b/tools/llvm-readobj/COFFDumper.cpp
index e5ff3e4186de..9fb3267e2f9d 100644
--- a/tools/llvm-readobj/COFFDumper.cpp
+++ b/tools/llvm-readobj/COFFDumper.cpp
@@ -1637,7 +1637,11 @@ static StringRef getBaseRelocTypeName(uint8_t Type) {
case COFF::IMAGE_REL_BASED_HIGHADJ: return "HIGHADJ";
case COFF::IMAGE_REL_BASED_ARM_MOV32T: return "ARM_MOV32(T)";
case COFF::IMAGE_REL_BASED_DIR64: return "DIR64";
- default: return "unknown (" + llvm::utostr(Type) + ")";
+ default: {
+ static std::string Result;
+ Result = "unknown (" + llvm::utostr(Type) + ")";
+ return Result;
+ }
}
}
diff --git a/tools/llvm-readobj/WasmDumper.cpp b/tools/llvm-readobj/WasmDumper.cpp
index 14603f8a2b09..266226d59ee8 100644
--- a/tools/llvm-readobj/WasmDumper.cpp
+++ b/tools/llvm-readobj/WasmDumper.cpp
@@ -153,6 +153,12 @@ void WasmDumper::printSections() {
switch (WasmSec.Type) {
case wasm::WASM_SEC_CUSTOM:
W.printString("Name", WasmSec.Name);
+ if (WasmSec.Name == "linking") {
+ const wasm::WasmLinkingData &LinkingData = Obj->linkingData();
+ W.printNumber("DataSize", LinkingData.DataSize);
+ if (LinkingData.DataAlignment)
+ W.printNumber("DataAlignment", LinkingData.DataAlignment);
+ }
break;
case wasm::WASM_SEC_MEMORY:
ListScope Group(W, "Memories");
diff --git a/tools/llvm-shlib/CMakeLists.txt b/tools/llvm-shlib/CMakeLists.txt
index 3ebede00cc43..907345a94023 100644
--- a/tools/llvm-shlib/CMakeLists.txt
+++ b/tools/llvm-shlib/CMakeLists.txt
@@ -37,7 +37,7 @@ endif()
add_llvm_library(LLVM SHARED DISABLE_LLVM_LINK_LLVM_DYLIB SONAME ${SOURCES})
list(REMOVE_DUPLICATES LIB_NAMES)
-if(("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") OR (MINGW) OR ("${CMAKE_SYSTEM_NAME}" STREQUAL "FreeBSD") OR ("${CMAKE_SYSTEM_NAME}" STREQUAL "DragonFly")) # FIXME: It should be "GNU ld for elf"
+if(("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") OR (MINGW) OR (HAIKU) OR ("${CMAKE_SYSTEM_NAME}" STREQUAL "FreeBSD") OR ("${CMAKE_SYSTEM_NAME}" STREQUAL "DragonFly")) # FIXME: It should be "GNU ld for elf"
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/simple_version_script.map.in
${LLVM_LIBRARY_DIR}/tools/llvm-shlib/simple_version_script.map)
diff --git a/tools/llvm-stress/llvm-stress.cpp b/tools/llvm-stress/llvm-stress.cpp
index 3cf8b37bc2e2..3945da7020b0 100644
--- a/tools/llvm-stress/llvm-stress.cpp
+++ b/tools/llvm-stress/llvm-stress.cpp
@@ -385,10 +385,10 @@ struct ConstModifier: public Modifier {
if (Ty->isVectorTy()) {
switch (getRandom() % 2) {
- case 0: if (Ty->getScalarType()->isIntegerTy())
+ case 0: if (Ty->isIntOrIntVectorTy())
return PT->push_back(ConstantVector::getAllOnesValue(Ty));
break;
- case 1: if (Ty->getScalarType()->isIntegerTy())
+ case 1: if (Ty->isIntOrIntVectorTy())
return PT->push_back(ConstantVector::getNullValue(Ty));
}
}
@@ -531,8 +531,7 @@ struct CastModifier: public Modifier {
}
// Both types are integers:
- if (VTy->getScalarType()->isIntegerTy() &&
- DestTy->getScalarType()->isIntegerTy()) {
+ if (VTy->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy()) {
if (VSize > DestSize) {
return PT->push_back(
new TruncInst(V, DestTy, "Tr", BB->getTerminator()));
@@ -546,8 +545,7 @@ struct CastModifier: public Modifier {
}
// Fp to int.
- if (VTy->getScalarType()->isFloatingPointTy() &&
- DestTy->getScalarType()->isIntegerTy()) {
+ if (VTy->isFPOrFPVectorTy() && DestTy->isIntOrIntVectorTy()) {
if (getRandom() & 1)
return PT->push_back(
new FPToSIInst(V, DestTy, "FC", BB->getTerminator()));
@@ -555,8 +553,7 @@ struct CastModifier: public Modifier {
}
// Int to fp.
- if (VTy->getScalarType()->isIntegerTy() &&
- DestTy->getScalarType()->isFloatingPointTy()) {
+ if (VTy->isIntOrIntVectorTy() && DestTy->isFPOrFPVectorTy()) {
if (getRandom() & 1)
return PT->push_back(
new SIToFPInst(V, DestTy, "FC", BB->getTerminator()));
@@ -565,8 +562,7 @@ struct CastModifier: public Modifier {
}
// Both floats.
- if (VTy->getScalarType()->isFloatingPointTy() &&
- DestTy->getScalarType()->isFloatingPointTy()) {
+ if (VTy->isFPOrFPVectorTy() && DestTy->isFPOrFPVectorTy()) {
if (VSize > DestSize) {
return PT->push_back(
new FPTruncInst(V, DestTy, "Tr", BB->getTerminator()));
diff --git a/tools/obj2yaml/wasm2yaml.cpp b/tools/obj2yaml/wasm2yaml.cpp
index 1df6afcf3c46..a1da4b6a748c 100644
--- a/tools/obj2yaml/wasm2yaml.cpp
+++ b/tools/obj2yaml/wasm2yaml.cpp
@@ -236,9 +236,10 @@ ErrorOr<WasmYAML::Object *> WasmDumper::dump() {
auto DataSec = make_unique<WasmYAML::DataSection>();
for (auto &Segment : Obj.dataSegments()) {
WasmYAML::DataSegment Seg;
- Seg.Index = Segment.Index;
- Seg.Offset = Segment.Offset;
- Seg.Content = yaml::BinaryRef(Segment.Content);
+ Seg.SectionOffset = Segment.SectionOffset;
+ Seg.MemoryIndex = Segment.Data.MemoryIndex;
+ Seg.Offset = Segment.Data.Offset;
+ Seg.Content = yaml::BinaryRef(Segment.Data.Content);
DataSec->Segments.push_back(Seg);
}
S = std::move(DataSec);
diff --git a/tools/opt-viewer/CMakeLists.txt b/tools/opt-viewer/CMakeLists.txt
new file mode 100644
index 000000000000..19b606933082
--- /dev/null
+++ b/tools/opt-viewer/CMakeLists.txt
@@ -0,0 +1,13 @@
+set (files
+ "opt-diff.py"
+ "opt-stats.py"
+ "opt-viewer.py"
+ "optpmap.py"
+ "optrecord.py"
+ "style.css")
+
+foreach (file ${files})
+ install(PROGRAMS ${file}
+ DESTINATION share/opt-viewer
+ COMPONENT opt-viewer)
+endforeach (file)
diff --git a/utils/opt-viewer/opt-diff.py b/tools/opt-viewer/opt-diff.py
index 9e921f8488d3..9e921f8488d3 100755
--- a/utils/opt-viewer/opt-diff.py
+++ b/tools/opt-viewer/opt-diff.py
diff --git a/utils/opt-viewer/opt-stats.py b/tools/opt-viewer/opt-stats.py
index a7e598fdfd02..a7e598fdfd02 100755
--- a/utils/opt-viewer/opt-stats.py
+++ b/tools/opt-viewer/opt-stats.py
diff --git a/utils/opt-viewer/opt-viewer.py b/tools/opt-viewer/opt-viewer.py
index 5e5daf7feb0d..e6dd6a0286fe 100755
--- a/utils/opt-viewer/opt-viewer.py
+++ b/tools/opt-viewer/opt-viewer.py
@@ -178,7 +178,10 @@ def map_remarks(all_remarks):
for arg in remark.Args:
caller = arg.get('Caller')
if caller:
- context.caller_loc[caller] = arg['DebugLoc']
+ try:
+ context.caller_loc[caller] = arg['DebugLoc']
+ except KeyError:
+ pass
def generate_report(all_remarks,
diff --git a/utils/opt-viewer/optpmap.py b/tools/opt-viewer/optpmap.py
index 01e848e03976..01e848e03976 100644
--- a/utils/opt-viewer/optpmap.py
+++ b/tools/opt-viewer/optpmap.py
diff --git a/utils/opt-viewer/optrecord.py b/tools/opt-viewer/optrecord.py
index 61ed9626cffa..61ed9626cffa 100644
--- a/utils/opt-viewer/optrecord.py
+++ b/tools/opt-viewer/optrecord.py
diff --git a/utils/opt-viewer/style.css b/tools/opt-viewer/style.css
index 595c3e46847d..595c3e46847d 100644
--- a/utils/opt-viewer/style.css
+++ b/tools/opt-viewer/style.css
diff --git a/tools/opt/NewPMDriver.cpp b/tools/opt/NewPMDriver.cpp
index 58e9caeff0fb..94242d795aae 100644
--- a/tools/opt/NewPMDriver.cpp
+++ b/tools/opt/NewPMDriver.cpp
@@ -48,6 +48,102 @@ static cl::opt<std::string>
"pipeline for handling managed aliasing queries"),
cl::Hidden);
+/// {{@ These options accept textual pipeline descriptions which will be
+/// inserted into default pipelines at the respective extension points
+static cl::opt<std::string> PeepholeEPPipeline(
+ "passes-ep-peephole",
+ cl::desc("A textual description of the function pass pipeline inserted at "
+ "the Peephole extension points into default pipelines"),
+ cl::Hidden);
+static cl::opt<std::string> LateLoopOptimizationsEPPipeline(
+ "passes-ep-late-loop-optimizations",
+ cl::desc(
+ "A textual description of the loop pass pipeline inserted at "
+ "the LateLoopOptimizations extension point into default pipelines"),
+ cl::Hidden);
+static cl::opt<std::string> LoopOptimizerEndEPPipeline(
+ "passes-ep-loop-optimizer-end",
+ cl::desc("A textual description of the loop pass pipeline inserted at "
+ "the LoopOptimizerEnd extension point into default pipelines"),
+ cl::Hidden);
+static cl::opt<std::string> ScalarOptimizerLateEPPipeline(
+ "passes-ep-scalar-optimizer-late",
+ cl::desc("A textual description of the function pass pipeline inserted at "
+ "the ScalarOptimizerLate extension point into default pipelines"),
+ cl::Hidden);
+static cl::opt<std::string> CGSCCOptimizerLateEPPipeline(
+ "passes-ep-cgscc-optimizer-late",
+ cl::desc("A textual description of the cgscc pass pipeline inserted at "
+ "the CGSCCOptimizerLate extension point into default pipelines"),
+ cl::Hidden);
+static cl::opt<std::string> VectorizerStartEPPipeline(
+ "passes-ep-vectorizer-start",
+ cl::desc("A textual description of the function pass pipeline inserted at "
+ "the VectorizerStart extension point into default pipelines"),
+ cl::Hidden);
+/// @}}
+
+template <typename PassManagerT>
+bool tryParsePipelineText(PassBuilder &PB, StringRef PipelineText) {
+ if (PipelineText.empty())
+ return false;
+
+ // Verify the pipeline is parseable:
+ PassManagerT PM;
+ if (PB.parsePassPipeline(PM, PipelineText))
+ return true;
+
+ errs() << "Could not parse pipeline '" << PipelineText
+ << "'. I'm going to igore it.\n";
+ return false;
+}
+
+/// If one of the EPPipeline command line options was given, register callbacks
+/// for parsing and inserting the given pipeline
+static void registerEPCallbacks(PassBuilder &PB, bool VerifyEachPass,
+ bool DebugLogging) {
+ if (tryParsePipelineText<FunctionPassManager>(PB, PeepholeEPPipeline))
+ PB.registerPeepholeEPCallback([&PB, VerifyEachPass, DebugLogging](
+ FunctionPassManager &PM, PassBuilder::OptimizationLevel Level) {
+ PB.parsePassPipeline(PM, PeepholeEPPipeline, VerifyEachPass,
+ DebugLogging);
+ });
+ if (tryParsePipelineText<LoopPassManager>(PB,
+ LateLoopOptimizationsEPPipeline))
+ PB.registerLateLoopOptimizationsEPCallback(
+ [&PB, VerifyEachPass, DebugLogging](
+ LoopPassManager &PM, PassBuilder::OptimizationLevel Level) {
+ PB.parsePassPipeline(PM, LateLoopOptimizationsEPPipeline,
+ VerifyEachPass, DebugLogging);
+ });
+ if (tryParsePipelineText<LoopPassManager>(PB, LoopOptimizerEndEPPipeline))
+ PB.registerLoopOptimizerEndEPCallback([&PB, VerifyEachPass, DebugLogging](
+ LoopPassManager &PM, PassBuilder::OptimizationLevel Level) {
+ PB.parsePassPipeline(PM, LoopOptimizerEndEPPipeline, VerifyEachPass,
+ DebugLogging);
+ });
+ if (tryParsePipelineText<FunctionPassManager>(PB,
+ ScalarOptimizerLateEPPipeline))
+ PB.registerScalarOptimizerLateEPCallback(
+ [&PB, VerifyEachPass, DebugLogging](
+ FunctionPassManager &PM, PassBuilder::OptimizationLevel Level) {
+ PB.parsePassPipeline(PM, ScalarOptimizerLateEPPipeline,
+ VerifyEachPass, DebugLogging);
+ });
+ if (tryParsePipelineText<CGSCCPassManager>(PB, CGSCCOptimizerLateEPPipeline))
+ PB.registerCGSCCOptimizerLateEPCallback([&PB, VerifyEachPass, DebugLogging](
+ CGSCCPassManager &PM, PassBuilder::OptimizationLevel Level) {
+ PB.parsePassPipeline(PM, CGSCCOptimizerLateEPPipeline, VerifyEachPass,
+ DebugLogging);
+ });
+ if (tryParsePipelineText<FunctionPassManager>(PB, VectorizerStartEPPipeline))
+ PB.registerVectorizerStartEPCallback([&PB, VerifyEachPass, DebugLogging](
+ FunctionPassManager &PM, PassBuilder::OptimizationLevel Level) {
+ PB.parsePassPipeline(PM, VectorizerStartEPPipeline, VerifyEachPass,
+ DebugLogging);
+ });
+}
+
bool llvm::runPassPipeline(StringRef Arg0, Module &M, TargetMachine *TM,
tool_output_file *Out,
tool_output_file *ThinLTOLinkOut,
@@ -56,7 +152,9 @@ bool llvm::runPassPipeline(StringRef Arg0, Module &M, TargetMachine *TM,
bool ShouldPreserveAssemblyUseListOrder,
bool ShouldPreserveBitcodeUseListOrder,
bool EmitSummaryIndex, bool EmitModuleHash) {
+ bool VerifyEachPass = VK == VK_VerifyEachPass;
PassBuilder PB(TM);
+ registerEPCallbacks(PB, VerifyEachPass, DebugPM);
// Specially handle the alias analysis manager so that we can register
// a custom pipeline of AA passes with it.
@@ -85,8 +183,7 @@ bool llvm::runPassPipeline(StringRef Arg0, Module &M, TargetMachine *TM,
if (VK > VK_NoVerifier)
MPM.addPass(VerifierPass());
- if (!PB.parsePassPipeline(MPM, PassPipeline, VK == VK_VerifyEachPass,
- DebugPM)) {
+ if (!PB.parsePassPipeline(MPM, PassPipeline, VerifyEachPass, DebugPM)) {
errs() << Arg0 << ": unable to parse pass pipeline description.\n";
return false;
}
diff --git a/tools/sanstats/sanstats.cpp b/tools/sanstats/sanstats.cpp
index b2216eab119e..4463c0f0e48c 100644
--- a/tools/sanstats/sanstats.cpp
+++ b/tools/sanstats/sanstats.cpp
@@ -76,8 +76,11 @@ const char *ReadModule(char SizeofPtr, const char *Begin, const char *End) {
if (Begin == End)
return nullptr;
+ // As the instrumentation tracks the return address and not
+ // the address of the call to `__sanitizer_stats_report` we
+ // remove one from the address to get the correct DI.
if (Expected<DILineInfo> LineInfo =
- Symbolizer.symbolizeCode(Filename, Addr)) {
+ Symbolizer.symbolizeCode(Filename, Addr - 1)) {
llvm::outs() << LineInfo->FileName << ':' << LineInfo->Line << ' '
<< LineInfo->FunctionName << ' ';
} else {
diff --git a/tools/yaml2obj/yaml2wasm.cpp b/tools/yaml2obj/yaml2wasm.cpp
index 110700d40c32..059ec5f9edcd 100644
--- a/tools/yaml2obj/yaml2wasm.cpp
+++ b/tools/yaml2obj/yaml2wasm.cpp
@@ -338,7 +338,7 @@ int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::DataSection &Section) {
encodeULEB128(Section.Segments.size(), OS);
for (auto &Segment : Section.Segments) {
- encodeULEB128(Segment.Index, OS);
+ encodeULEB128(Segment.MemoryIndex, OS);
writeInitExpr(Segment.Offset, OS);
encodeULEB128(Segment.Content.binary_size(), OS);
Segment.Content.writeAsBinary(OS);
diff --git a/unittests/ADT/APFloatTest.cpp b/unittests/ADT/APFloatTest.cpp
index 338f2b8a5a66..69da089f172b 100644
--- a/unittests/ADT/APFloatTest.cpp
+++ b/unittests/ADT/APFloatTest.cpp
@@ -552,7 +552,7 @@ TEST(APFloatTest, MaxNum) {
EXPECT_EQ(2.0, maxnum(f1, f2).convertToDouble());
EXPECT_EQ(2.0, maxnum(f2, f1).convertToDouble());
EXPECT_EQ(1.0, maxnum(f1, nan).convertToDouble());
- EXPECT_EQ(1.0, minnum(nan, f1).convertToDouble());
+ EXPECT_EQ(1.0, maxnum(nan, f1).convertToDouble());
}
TEST(APFloatTest, Denormal) {
@@ -745,7 +745,7 @@ TEST(APFloatTest, fromZeroDecimalLargeExponentString) {
EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble(), "000.0000e1234").convertToDouble());
EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble(), "000.0000e-1234").convertToDouble());
- EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble(), StringRef("0e1234\02", 6)).convertToDouble());
+ EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble(), StringRef("0e1234" "\0" "2", 6)).convertToDouble());
}
TEST(APFloatTest, fromZeroHexadecimalString) {
@@ -1059,11 +1059,11 @@ TEST(APFloatTest, StringDecimalDeath) {
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("\0", 1)), "Invalid character in significand");
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1\0", 2)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1\02", 3)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1\02e1", 5)), "Invalid character in significand");
+ EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1" "\0" "2", 3)), "Invalid character in significand");
+ EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1" "\0" "2e1", 5)), "Invalid character in significand");
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1e\0", 3)), "Invalid character in exponent");
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1e1\0", 4)), "Invalid character in exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1e1\02", 5)), "Invalid character in exponent");
+ EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("1e1" "\0" "2", 5)), "Invalid character in exponent");
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), "1.0f"), "Invalid character in significand");
@@ -1149,11 +1149,11 @@ TEST(APFloatTest, StringHexadecimalDeath) {
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x\0", 3)), "Invalid character in significand");
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1\0", 4)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1\02", 5)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1\02p1", 7)), "Invalid character in significand");
+ EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1" "\0" "2", 5)), "Invalid character in significand");
+ EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1" "\0" "2p1", 7)), "Invalid character in significand");
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1p\0", 5)), "Invalid character in exponent");
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1p1\0", 6)), "Invalid character in exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1p1\02", 7)), "Invalid character in exponent");
+ EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), StringRef("0x1p1" "\0" "2", 7)), "Invalid character in exponent");
EXPECT_DEATH(APFloat(APFloat::IEEEdouble(), "0x1p0f"), "Invalid character in exponent");
diff --git a/unittests/ADT/FunctionRefTest.cpp b/unittests/ADT/FunctionRefTest.cpp
index 075d9a070df7..b7ef7d79e5f9 100644
--- a/unittests/ADT/FunctionRefTest.cpp
+++ b/unittests/ADT/FunctionRefTest.cpp
@@ -14,6 +14,20 @@ using namespace llvm;
namespace {
+// Ensure that there is a default constructor and we can test for a null
+// function_ref.
+TEST(FunctionRefTest, Null) {
+ function_ref<int()> F;
+ EXPECT_FALSE(F);
+
+ auto L = [] { return 1; };
+ F = L;
+ EXPECT_TRUE(F);
+
+ F = {};
+ EXPECT_FALSE(F);
+}
+
// Ensure that copies of a function_ref copy the underlying state rather than
// causing one function_ref to chain to the next.
TEST(FunctionRefTest, Copy) {
diff --git a/unittests/Analysis/AliasAnalysisTest.cpp b/unittests/Analysis/AliasAnalysisTest.cpp
index 84a04257bc27..9a864b77a9d8 100644
--- a/unittests/Analysis/AliasAnalysisTest.cpp
+++ b/unittests/Analysis/AliasAnalysisTest.cpp
@@ -180,10 +180,11 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB);
auto *CmpXChg1 = new AtomicCmpXchgInst(
Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1),
- AtomicOrdering::Monotonic, AtomicOrdering::Monotonic, CrossThread, BB);
+ AtomicOrdering::Monotonic, AtomicOrdering::Monotonic,
+ SyncScope::System, BB);
auto *AtomicRMW =
new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1),
- AtomicOrdering::Monotonic, CrossThread, BB);
+ AtomicOrdering::Monotonic, SyncScope::System, BB);
ReturnInst::Create(C, nullptr, BB);
diff --git a/unittests/Analysis/CGSCCPassManagerTest.cpp b/unittests/Analysis/CGSCCPassManagerTest.cpp
index ab5d1862c23e..d46d9535fa4b 100644
--- a/unittests/Analysis/CGSCCPassManagerTest.cpp
+++ b/unittests/Analysis/CGSCCPassManagerTest.cpp
@@ -680,6 +680,7 @@ TEST_F(CGSCCPassManagerTest, TestSCCPassCanPreserveFunctionAnalysis) {
LazyCallGraph &, CGSCCUpdateResult &) {
PreservedAnalyses PA;
PA.preserve<LazyCallGraphAnalysis>();
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
PA.preserve<TestFunctionAnalysis>();
return PA;
}));
@@ -719,12 +720,14 @@ TEST_F(CGSCCPassManagerTest,
CGPM1.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
- // Now run a module pass that preserves the LazyCallGraph and proxy but not
+ // Now run a module pass that preserves the LazyCallGraph and proxies but not
// the Function analysis.
MPM.addPass(LambdaModulePass([&](Module &M, ModuleAnalysisManager &) {
PreservedAnalyses PA;
PA.preserve<LazyCallGraphAnalysis>();
PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ PA.preserve<FunctionAnalysisManagerModuleProxy>();
return PA;
}));
@@ -741,7 +744,7 @@ TEST_F(CGSCCPassManagerTest,
EXPECT_EQ(2 * 6, FunctionAnalysisRuns);
}
-// Check that by marking the function pass and FAM proxy as preserved, this
+// Check that by marking the function pass and proxies as preserved, this
// propagates all the way through.
TEST_F(CGSCCPassManagerTest,
TestModulePassCanPreserveFunctionAnalysisNestedInCGSCC) {
@@ -765,6 +768,7 @@ TEST_F(CGSCCPassManagerTest,
PreservedAnalyses PA;
PA.preserve<LazyCallGraphAnalysis>();
PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
PA.preserve<FunctionAnalysisManagerModuleProxy>();
PA.preserve<TestFunctionAnalysis>();
return PA;
@@ -1014,6 +1018,9 @@ TEST_F(CGSCCPassManagerTest, TestIndirectAnalysisInvalidation) {
FunctionCount += IndirectResult.SCCDep.FunctionCount;
return PreservedAnalyses::all();
}));
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(
+ RequireAnalysisPass<TestIndirectFunctionAnalysis, Function>()));
+
// Next, invalidate
// - both analyses for the (f) and (x) SCCs,
// - just the underlying (indirect) analysis for (g) SCC, and
@@ -1026,14 +1033,16 @@ TEST_F(CGSCCPassManagerTest, TestIndirectAnalysisInvalidation) {
auto &IndirectResult = DoublyIndirectResult.IDep;
FunctionCount += IndirectResult.SCCDep.FunctionCount;
auto PA = PreservedAnalyses::none();
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ PA.preserveSet<AllAnalysesOn<Function>>();
if (C.getName() == "(g)")
PA.preserve<TestSCCAnalysis>();
else if (C.getName() == "(h3, h1, h2)")
PA.preserve<TestIndirectSCCAnalysis>();
return PA;
}));
- // Finally, use the analysis again on each function, forcing re-computation
- // for all of them.
+ // Finally, use the analysis again on each SCC (and function), forcing
+ // re-computation for all of them.
CGPM.addPass(
LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
LazyCallGraph &CG, CGSCCUpdateResult &) {
@@ -1043,6 +1052,8 @@ TEST_F(CGSCCPassManagerTest, TestIndirectAnalysisInvalidation) {
FunctionCount += IndirectResult.SCCDep.FunctionCount;
return PreservedAnalyses::all();
}));
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(
+ RequireAnalysisPass<TestIndirectFunctionAnalysis, Function>()));
// Create a second CGSCC pass manager. This will cause the module-level
// invalidation to occur, which will force yet another invalidation of the
@@ -1058,13 +1069,15 @@ TEST_F(CGSCCPassManagerTest, TestIndirectAnalysisInvalidation) {
FunctionCount += IndirectResult.SCCDep.FunctionCount;
return PreservedAnalyses::all();
}));
+ CGPM2.addPass(createCGSCCToFunctionPassAdaptor(
+ RequireAnalysisPass<TestIndirectFunctionAnalysis, Function>()));
- // Add a requires pass to populate the module analysis and then our function
+ // Add a requires pass to populate the module analysis and then our CGSCC
// pass pipeline.
MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
// Now require the module analysis again (it will have been invalidated once)
- // and then use it again from a function pass manager.
+ // and then use it again from our second CGSCC pipeline..
MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
MPM.run(*M, MAM);
@@ -1080,7 +1093,180 @@ TEST_F(CGSCCPassManagerTest, TestIndirectAnalysisInvalidation) {
EXPECT_EQ(3 * 4, IndirectSCCAnalysisRuns);
EXPECT_EQ(3 * 4, DoublyIndirectSCCAnalysisRuns);
+ // We run the indirect function analysis once per function the first time.
+ // Then we re-run it for every SCC but "(g)". Then we re-run it for every
+ // function again.
+ EXPECT_EQ(6 + 5 + 6, IndirectFunctionAnalysisRuns);
+
// Four passes count each of six functions once (via SCCs).
EXPECT_EQ(4 * 6, FunctionCount);
}
+
+TEST_F(CGSCCPassManagerTest, TestAnalysisInvalidationCGSCCUpdate) {
+ int ModuleAnalysisRuns = 0;
+ MAM.registerPass([&] { return TestModuleAnalysis(ModuleAnalysisRuns); });
+
+ int SCCAnalysisRuns = 0, IndirectSCCAnalysisRuns = 0,
+ DoublyIndirectSCCAnalysisRuns = 0;
+ CGAM.registerPass([&] { return TestSCCAnalysis(SCCAnalysisRuns); });
+ CGAM.registerPass(
+ [&] { return TestIndirectSCCAnalysis(IndirectSCCAnalysisRuns); });
+ CGAM.registerPass([&] {
+ return TestDoublyIndirectSCCAnalysis(DoublyIndirectSCCAnalysisRuns);
+ });
+
+ int FunctionAnalysisRuns = 0, IndirectFunctionAnalysisRuns = 0;
+ FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+ FAM.registerPass([&] {
+ return TestIndirectFunctionAnalysis(IndirectFunctionAnalysisRuns);
+ });
+
+ ModulePassManager MPM(/*DebugLogging*/ true);
+
+ CGSCCPassManager CGPM(/*DebugLogging*/ true);
+ // First just use the analysis to get the function count and preserve
+ // everything.
+ using RequireTestIndirectFunctionAnalysisPass =
+ RequireAnalysisPass<TestIndirectFunctionAnalysis, Function>;
+ using RequireTestDoublyIndirectSCCAnalysisPass =
+ RequireAnalysisPass<TestDoublyIndirectSCCAnalysis, LazyCallGraph::SCC,
+ CGSCCAnalysisManager, LazyCallGraph &,
+ CGSCCUpdateResult &>;
+ CGPM.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(
+ RequireTestIndirectFunctionAnalysisPass()));
+
+ // Next, we inject an SCC pass that invalidates everything for the `(h3, h1,
+ // h2)` SCC but also deletes the call edge from `h2` to `h3` and updates the
+ // CG. This should successfully invalidate (and force to be re-run) all the
+ // analyses for that SCC and for the functions.
+ CGPM.addPass(
+ LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+ LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+ (void)AM.getResult<TestDoublyIndirectSCCAnalysis>(C, CG);
+ if (C.getName() != "(h3, h1, h2)")
+ return PreservedAnalyses::all();
+
+ // Build the preserved set.
+ auto PA = PreservedAnalyses::none();
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ PA.preserve<TestIndirectSCCAnalysis>();
+ PA.preserve<TestDoublyIndirectSCCAnalysis>();
+
+ // Delete the call from `h2` to `h3`.
+ auto &H2N = *llvm::find_if(
+ C, [](LazyCallGraph::Node &N) { return N.getName() == "h2"; });
+ auto &H2F = H2N.getFunction();
+ auto &H3F = *cast<CallInst>(H2F.begin()->begin())->getCalledFunction();
+ assert(H3F.getName() == "h3" && "Wrong called function!");
+ H2F.begin()->begin()->eraseFromParent();
+ // Insert a bitcast of `h3` so that we retain a ref edge to it.
+ (void)CastInst::CreatePointerCast(&H3F,
+ Type::getInt8PtrTy(H2F.getContext()),
+ "dummy", &*H2F.begin()->begin());
+
+ // Now update the call graph.
+ auto &NewC = updateCGAndAnalysisManagerForFunctionPass(
+ CG, C, H2N, AM, UR, /*DebugLogging*/ true);
+ assert(&NewC != &C && "Should get a new SCC due to update!");
+ (void)&NewC;
+
+ return PA;
+ }));
+ // Now use the analysis again on each SCC and function, forcing
+ // re-computation for all of them.
+ CGPM.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(
+ RequireTestIndirectFunctionAnalysisPass()));
+
+ // Create another CGSCC pipeline that requires all the analyses again.
+ CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+ CGPM2.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+ CGPM2.addPass(createCGSCCToFunctionPassAdaptor(
+ RequireTestIndirectFunctionAnalysisPass()));
+
+ // Next we inject an SCC pass that finds the `(h2)` SCC, adds a call to `h3`
+ // back to `h2`, and then invalidates everything for what will then be the
+ // `(h3, h1, h2)` SCC again.
+ CGSCCPassManager CGPM3(/*DebugLogging*/ true);
+ CGPM3.addPass(
+ LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+ LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+ (void)AM.getResult<TestDoublyIndirectSCCAnalysis>(C, CG);
+ if (C.getName() != "(h2)")
+ return PreservedAnalyses::all();
+
+ // Build the preserved set.
+ auto PA = PreservedAnalyses::none();
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ PA.preserve<TestIndirectSCCAnalysis>();
+ PA.preserve<TestDoublyIndirectSCCAnalysis>();
+
+ // Delete the bitcast of `h3` that we added earlier.
+ auto &H2N = *C.begin();
+ auto &H2F = H2N.getFunction();
+ auto &H3F = *cast<Function>(cast<BitCastInst>(H2F.begin()->begin())->getOperand(0));
+ assert(H3F.getName() == "h3" && "Wrong called function!");
+ H2F.begin()->begin()->eraseFromParent();
+ // And insert a call to `h3`.
+ (void)CallInst::Create(&H3F, {}, "", &*H2F.begin()->begin());
+
+ // Now update the call graph.
+ auto &NewC = updateCGAndAnalysisManagerForFunctionPass(
+ CG, C, H2N, AM, UR, /*DebugLogging*/ true);
+ assert(&NewC != &C && "Should get a new SCC due to update!");
+ (void)&NewC;
+
+ return PA;
+ }));
+ // Now use the analysis again on each SCC and function, forcing
+ // re-computation for all of them.
+ CGPM3.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+ CGPM3.addPass(createCGSCCToFunctionPassAdaptor(
+ RequireTestIndirectFunctionAnalysisPass()));
+
+ // Create a second CGSCC pass manager. This will cause the module-level
+ // invalidation to occur, which will force yet another invalidation of the
+ // indirect SCC-level analysis as the module analysis it depends on gets
+ // invalidated.
+ CGSCCPassManager CGPM4(/*DebugLogging*/ true);
+ CGPM4.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+ CGPM4.addPass(createCGSCCToFunctionPassAdaptor(
+ RequireTestIndirectFunctionAnalysisPass()));
+
+ // Add a requires pass to populate the module analysis and then one of our
+ // CGSCC pipelines. Repeat for all four CGSCC pipelines.
+ MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+ MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+ MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM3)));
+ MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM4)));
+ MPM.run(*M, MAM);
+
+ // We run over four SCCs the first time. But then we split an SCC into three.
+ // And then we merge those three back into one.
+ EXPECT_EQ(4 + 3 + 1, SCCAnalysisRuns);
+ // The module analysis pass should be run three times.
+ EXPECT_EQ(3, ModuleAnalysisRuns);
+ // We run over four SCCs the first time. Then over the two new ones. Then the
+ // entire module is invalidated causing a full run over all seven. Then we
+ // fold three SCCs back to one, and then run over the whole module again.
+ EXPECT_EQ(4 + 2 + 7 + 1 + 4, IndirectSCCAnalysisRuns);
+ EXPECT_EQ(4 + 2 + 7 + 1 + 4, DoublyIndirectSCCAnalysisRuns);
+
+ // First we run over all six functions. Then we re-run it over three when we
+ // split their SCCs. Then we re-run over the whole module. Then we re-run
+ // over three functions merged back into a single SCC, and then over the
+ // whole module again.
+ EXPECT_EQ(6 + 3 + 6 + 3 + 6, FunctionAnalysisRuns);
+
+ // Re run the function analysis over the entire module, and then re-run it
+ // over the `(h3, h1, h2)` SCC due to invalidation. Then we re-run it over
+ // the entire module, then the three functions merged back into a single SCC,
+ // and then over the whole module.
+ EXPECT_EQ(6 + 3 + 6 + 3 + 6, IndirectFunctionAnalysisRuns);
+}
}
diff --git a/unittests/Analysis/LazyCallGraphTest.cpp b/unittests/Analysis/LazyCallGraphTest.cpp
index 8c251cf043b8..65730486cd75 100644
--- a/unittests/Analysis/LazyCallGraphTest.cpp
+++ b/unittests/Analysis/LazyCallGraphTest.cpp
@@ -1277,9 +1277,10 @@ TEST(LazyCallGraphTest, InternalEdgeMutation) {
// be invalidated.
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
- auto InvalidatedSCCs = RC.switchInternalEdgeToCall(A, C);
- ASSERT_EQ(1u, InvalidatedSCCs.size());
- EXPECT_EQ(&AC, InvalidatedSCCs[0]);
+ EXPECT_TRUE(RC.switchInternalEdgeToCall(A, C, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+ ASSERT_EQ(1u, MergedCs.size());
+ EXPECT_EQ(&AC, MergedCs[0]);
+ }));
EXPECT_EQ(2, CC.size());
EXPECT_EQ(&CC, CG.lookupSCC(A));
EXPECT_EQ(&CC, CG.lookupSCC(C));
@@ -1586,8 +1587,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
// Switch the ref edge from A -> D to a call edge. This should have no
// effect as it is already in postorder and no new cycles are formed.
- auto MergedCs = RC.switchInternalEdgeToCall(A, D);
- EXPECT_EQ(0u, MergedCs.size());
+ EXPECT_FALSE(RC.switchInternalEdgeToCall(A, D));
ASSERT_EQ(4, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&BC, &RC[1]);
@@ -1596,8 +1596,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
// Switch B -> C to a call edge. This doesn't form any new cycles but does
// require reordering the SCCs.
- MergedCs = RC.switchInternalEdgeToCall(B, C);
- EXPECT_EQ(0u, MergedCs.size());
+ EXPECT_FALSE(RC.switchInternalEdgeToCall(B, C));
ASSERT_EQ(4, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&CC, &RC[1]);
@@ -1605,9 +1604,10 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
EXPECT_EQ(&AC, &RC[3]);
// Switch C -> B to a call edge. This forms a cycle and forces merging SCCs.
- MergedCs = RC.switchInternalEdgeToCall(C, B);
- ASSERT_EQ(1u, MergedCs.size());
- EXPECT_EQ(&CC, MergedCs[0]);
+ EXPECT_TRUE(RC.switchInternalEdgeToCall(C, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+ ASSERT_EQ(1u, MergedCs.size());
+ EXPECT_EQ(&CC, MergedCs[0]);
+ }));
ASSERT_EQ(3, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&BC, &RC[1]);
@@ -1720,8 +1720,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
// Switch C3 -> B1 to a call edge. This doesn't form any new cycles but does
// require reordering the SCCs in the face of tricky internal node
// structures.
- auto MergedCs = RC.switchInternalEdgeToCall(C3, B1);
- EXPECT_EQ(0u, MergedCs.size());
+ EXPECT_FALSE(RC.switchInternalEdgeToCall(C3, B1));
ASSERT_EQ(8, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&B3C, &RC[1]);
@@ -1852,10 +1851,12 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
// C F C | |
// \ / \ / |
// G G |
- auto MergedCs = RC.switchInternalEdgeToCall(F, B);
- ASSERT_EQ(2u, MergedCs.size());
- EXPECT_EQ(&FC, MergedCs[0]);
- EXPECT_EQ(&DC, MergedCs[1]);
+ EXPECT_TRUE(RC.switchInternalEdgeToCall(
+ F, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+ ASSERT_EQ(2u, MergedCs.size());
+ EXPECT_EQ(&FC, MergedCs[0]);
+ EXPECT_EQ(&DC, MergedCs[1]);
+ }));
EXPECT_EQ(3, BC.size());
// And make sure the postorder was updated.
diff --git a/unittests/ExecutionEngine/Orc/CompileOnDemandLayerTest.cpp b/unittests/ExecutionEngine/Orc/CompileOnDemandLayerTest.cpp
index 13693381c006..844746f28c06 100644
--- a/unittests/ExecutionEngine/Orc/CompileOnDemandLayerTest.cpp
+++ b/unittests/ExecutionEngine/Orc/CompileOnDemandLayerTest.cpp
@@ -50,13 +50,14 @@ public:
TEST(CompileOnDemandLayerTest, FindSymbol) {
auto MockBaseLayer = createMockBaseLayer<int>(
- DoNothingAndReturn<int>(0), DoNothingAndReturn<void>(),
+ DoNothingAndReturn<int>(0),
+ [](int Handle) { return Error::success(); },
[](const std::string &Name, bool) {
if (Name == "foo")
return JITSymbol(1, JITSymbolFlags::Exported);
return JITSymbol(nullptr);
},
- DoNothingAndReturn<JITSymbol>(nullptr));
+ ReturnNullJITSymbol());
typedef decltype(MockBaseLayer) MockBaseLayerT;
DummyCallbackManager CallbackMgr;
diff --git a/unittests/ExecutionEngine/Orc/GlobalMappingLayerTest.cpp b/unittests/ExecutionEngine/Orc/GlobalMappingLayerTest.cpp
index 25b6c891c622..2756999c2160 100644
--- a/unittests/ExecutionEngine/Orc/GlobalMappingLayerTest.cpp
+++ b/unittests/ExecutionEngine/Orc/GlobalMappingLayerTest.cpp
@@ -17,7 +17,7 @@ namespace {
struct MockBaseLayer {
- typedef int ModuleSetHandleT;
+ typedef int ModuleHandleT;
JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
if (Name == "bar")
@@ -37,13 +37,15 @@ TEST(GlobalMappingLayerTest, Empty) {
// Test fall-through for symbol in base layer.
auto BarSym = L.findSymbol("bar", true);
- EXPECT_EQ(BarSym.getAddress(), static_cast<JITTargetAddress>(0x4567))
+ EXPECT_EQ(cantFail(BarSym.getAddress()),
+ static_cast<JITTargetAddress>(0x4567))
<< "Symbol lookup fall-through failed.";
// Test setup of a global mapping.
L.setGlobalMapping("foo", 0x0123);
auto FooSym2 = L.findSymbol("foo", true);
- EXPECT_EQ(FooSym2.getAddress(), static_cast<JITTargetAddress>(0x0123))
+ EXPECT_EQ(cantFail(FooSym2.getAddress()),
+ static_cast<JITTargetAddress>(0x0123))
<< "Symbol mapping setup failed.";
// Test removal of a global mapping.
diff --git a/unittests/ExecutionEngine/Orc/LazyEmittingLayerTest.cpp b/unittests/ExecutionEngine/Orc/LazyEmittingLayerTest.cpp
index f65dc0cd609d..0dba66d47535 100644
--- a/unittests/ExecutionEngine/Orc/LazyEmittingLayerTest.cpp
+++ b/unittests/ExecutionEngine/Orc/LazyEmittingLayerTest.cpp
@@ -27,7 +27,7 @@ struct MockBaseLayer {
TEST(LazyEmittingLayerTest, Empty) {
MockBaseLayer M;
llvm::orc::LazyEmittingLayer<MockBaseLayer> L(M);
- L.addModule(std::unique_ptr<llvm::Module>(), nullptr, nullptr);
+ cantFail(L.addModule(std::unique_ptr<llvm::Module>(), nullptr));
}
}
diff --git a/unittests/ExecutionEngine/Orc/ObjectTransformLayerTest.cpp b/unittests/ExecutionEngine/Orc/ObjectTransformLayerTest.cpp
index 2fdf9e8b7379..25103f79ac6c 100644
--- a/unittests/ExecutionEngine/Orc/ObjectTransformLayerTest.cpp
+++ b/unittests/ExecutionEngine/Orc/ObjectTransformLayerTest.cpp
@@ -14,6 +14,7 @@
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/NullResolver.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/Object/ObjectFile.h"
#include "gtest/gtest.h"
@@ -21,12 +22,6 @@ using namespace llvm::orc;
namespace {
-// Stand-in for RuntimeDyld::MemoryManager
-typedef int MockMemoryManager;
-
-// Stand-in for RuntimeDyld::SymbolResolver
-typedef int MockSymbolResolver;
-
// stand-in for object::ObjectFile
typedef int MockObjectFile;
@@ -54,34 +49,37 @@ public:
MockBaseLayer() : MockSymbol(nullptr) { resetExpectations(); }
- template <typename ObjPtrT, typename MemoryManagerPtrT,
- typename SymbolResolverPtrT>
- ObjHandleT addObject(ObjPtrT Obj, MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- EXPECT_EQ(MockManager, *MemMgr) << "MM should pass through";
- EXPECT_EQ(MockResolver, *Resolver) << "Resolver should pass through";
+ template <typename ObjPtrT>
+ llvm::Expected<ObjHandleT>
+ addObject(ObjPtrT Obj,
+ std::shared_ptr<llvm::JITSymbolResolver> Resolver) {
+ EXPECT_EQ(MockResolver, Resolver) << "Resolver should pass through";
EXPECT_EQ(MockObject + 1, *Obj) << "Transform should be applied";
LastCalled = "addObject";
MockObjHandle = 111;
return MockObjHandle;
}
+
template <typename ObjPtrT>
- void expectAddObject(ObjPtrT Obj, MockMemoryManager *MemMgr,
- MockSymbolResolver *Resolver) {
- MockManager = *MemMgr;
- MockResolver = *Resolver;
+ void expectAddObject(ObjPtrT Obj,
+ std::shared_ptr<llvm::JITSymbolResolver> Resolver) {
+ MockResolver = Resolver;
MockObject = *Obj;
}
+
+
void verifyAddObject(ObjHandleT Returned) {
EXPECT_EQ("addObject", LastCalled);
EXPECT_EQ(MockObjHandle, Returned) << "Return should pass through";
resetExpectations();
}
- void removeObject(ObjHandleT H) {
+ llvm::Error removeObject(ObjHandleT H) {
EXPECT_EQ(MockObjHandle, H);
LastCalled = "removeObject";
+ return llvm::Error::success();
}
+
void expectRemoveObject(ObjHandleT H) { MockObjHandle = H; }
void verifyRemoveObject() {
EXPECT_EQ("removeObject", LastCalled);
@@ -94,7 +92,7 @@ public:
EXPECT_EQ(MockBool, ExportedSymbolsOnly) << "Flag should pass through";
LastCalled = "findSymbol";
MockSymbol = llvm::JITSymbol(122, llvm::JITSymbolFlags::None);
- return MockSymbol;
+ return llvm::JITSymbol(122, llvm::JITSymbolFlags::None);
}
void expectFindSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
MockName = Name;
@@ -102,7 +100,8 @@ public:
}
void verifyFindSymbol(llvm::JITSymbol Returned) {
EXPECT_EQ("findSymbol", LastCalled);
- EXPECT_EQ(MockSymbol.getAddress(), Returned.getAddress())
+ EXPECT_EQ(cantFail(MockSymbol.getAddress()),
+ cantFail(Returned.getAddress()))
<< "Return should pass through";
resetExpectations();
}
@@ -114,7 +113,7 @@ public:
EXPECT_EQ(MockBool, ExportedSymbolsOnly) << "Flag should pass through";
LastCalled = "findSymbolIn";
MockSymbol = llvm::JITSymbol(122, llvm::JITSymbolFlags::None);
- return MockSymbol;
+ return llvm::JITSymbol(122, llvm::JITSymbolFlags::None);
}
void expectFindSymbolIn(ObjHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
@@ -124,16 +123,20 @@ public:
}
void verifyFindSymbolIn(llvm::JITSymbol Returned) {
EXPECT_EQ("findSymbolIn", LastCalled);
- EXPECT_EQ(MockSymbol.getAddress(), Returned.getAddress())
+ EXPECT_EQ(cantFail(MockSymbol.getAddress()),
+ cantFail(Returned.getAddress()))
<< "Return should pass through";
resetExpectations();
}
- void emitAndFinalize(ObjHandleT H) {
+ llvm::Error emitAndFinalize(ObjHandleT H) {
EXPECT_EQ(MockObjHandle, H) << "Handle should pass through";
LastCalled = "emitAndFinalize";
+ return llvm::Error::success();
}
+
void expectEmitAndFinalize(ObjHandleT H) { MockObjHandle = H; }
+
void verifyEmitAndFinalize() {
EXPECT_EQ("emitAndFinalize", LastCalled);
resetExpectations();
@@ -160,8 +163,7 @@ public:
private:
// Backing fields for remembering parameter/return values
std::string LastCalled;
- MockMemoryManager MockManager;
- MockSymbolResolver MockResolver;
+ std::shared_ptr<llvm::JITSymbolResolver> MockResolver;
MockObjectFile MockObject;
ObjHandleT MockObjHandle;
std::string MockName;
@@ -174,8 +176,7 @@ private:
// Clear remembered parameters between calls
void resetExpectations() {
LastCalled = "nothing";
- MockManager = 0;
- MockResolver = 0;
+ MockResolver = nullptr;
MockObject = 0;
MockObjHandle = 0;
MockName = "bogus";
@@ -204,47 +205,42 @@ TEST(ObjectTransformLayerTest, Main) {
return Obj;
});
- // Instantiate some mock objects to use below
- MockMemoryManager MockManager = 233;
- MockSymbolResolver MockResolver = 244;
-
// Test addObject with T1 (allocating)
auto Obj1 = std::make_shared<MockObjectFile>(211);
- auto MM = llvm::make_unique<MockMemoryManager>(MockManager);
- auto SR = llvm::make_unique<MockSymbolResolver>(MockResolver);
- M.expectAddObject(Obj1, MM.get(), SR.get());
- auto H = T1.addObject(std::move(Obj1), std::move(MM), std::move(SR));
+ auto SR = std::make_shared<NullResolver>();
+ M.expectAddObject(Obj1, SR);
+ auto H = cantFail(T1.addObject(std::move(Obj1), SR));
M.verifyAddObject(H);
// Test addObjectSet with T2 (mutating)
auto Obj2 = std::make_shared<MockObjectFile>(222);
- M.expectAddObject(Obj2, &MockManager, &MockResolver);
- H = T2.addObject(Obj2, &MockManager, &MockResolver);
+ M.expectAddObject(Obj2, SR);
+ H = cantFail(T2.addObject(Obj2, SR));
M.verifyAddObject(H);
EXPECT_EQ(223, *Obj2) << "Expected mutation";
// Test removeObjectSet
M.expectRemoveObject(H);
- T1.removeObject(H);
+ cantFail(T1.removeObject(H));
M.verifyRemoveObject();
// Test findSymbol
std::string Name = "foo";
bool ExportedOnly = true;
M.expectFindSymbol(Name, ExportedOnly);
- llvm::JITSymbol Symbol = T2.findSymbol(Name, ExportedOnly);
- M.verifyFindSymbol(Symbol);
+ llvm::JITSymbol Sym1 = T2.findSymbol(Name, ExportedOnly);
+ M.verifyFindSymbol(std::move(Sym1));
// Test findSymbolIn
Name = "bar";
ExportedOnly = false;
M.expectFindSymbolIn(H, Name, ExportedOnly);
- Symbol = T1.findSymbolIn(H, Name, ExportedOnly);
- M.verifyFindSymbolIn(Symbol);
+ llvm::JITSymbol Sym2 = T1.findSymbolIn(H, Name, ExportedOnly);
+ M.verifyFindSymbolIn(std::move(Sym2));
// Test emitAndFinalize
M.expectEmitAndFinalize(H);
- T2.emitAndFinalize(H);
+ cantFail(T2.emitAndFinalize(H));
M.verifyEmitAndFinalize();
// Test mapSectionAddress
@@ -295,7 +291,11 @@ TEST(ObjectTransformLayerTest, Main) {
};
// Construct the jit layers.
- RTDyldObjectLinkingLayer BaseLayer;
+ RTDyldObjectLinkingLayer BaseLayer(
+ []() {
+ return std::make_shared<llvm::SectionMemoryManager>();
+ });
+
auto IdentityTransform =
[](std::shared_ptr<llvm::object::OwningBinary<llvm::object::ObjectFile>>
Obj) {
@@ -312,17 +312,16 @@ TEST(ObjectTransformLayerTest, Main) {
// Make sure that the calls from IRCompileLayer to ObjectTransformLayer
// compile.
- NullResolver Resolver;
- NullManager Manager;
- CompileLayer.addModule(std::shared_ptr<llvm::Module>(), &Manager, &Resolver);
+ auto Resolver = std::make_shared<NullResolver>();
+ cantFail(CompileLayer.addModule(std::shared_ptr<llvm::Module>(), Resolver));
// Make sure that the calls from ObjectTransformLayer to ObjectLinkingLayer
// compile.
decltype(TransformLayer)::ObjHandleT H2;
- TransformLayer.emitAndFinalize(H2);
+ cantFail(TransformLayer.emitAndFinalize(H2));
TransformLayer.findSymbolIn(H2, Name, false);
TransformLayer.findSymbol(Name, true);
TransformLayer.mapSectionAddress(H2, nullptr, 0);
- TransformLayer.removeObject(H2);
+ cantFail(TransformLayer.removeObject(H2));
}
}
diff --git a/unittests/ExecutionEngine/Orc/OrcCAPITest.cpp b/unittests/ExecutionEngine/Orc/OrcCAPITest.cpp
index 2900a9c92766..5a4d6b4a2252 100644
--- a/unittests/ExecutionEngine/Orc/OrcCAPITest.cpp
+++ b/unittests/ExecutionEngine/Orc/OrcCAPITest.cpp
@@ -66,10 +66,11 @@ protected:
auto *ET = CCtx->APIExecTest;
CCtx->M = ET->createTestModule(ET->TM->getTargetTriple());
LLVMSharedModuleRef SM = LLVMOrcMakeSharedModule(wrap(CCtx->M.release()));
- CCtx->H = LLVMOrcAddEagerlyCompiledIR(JITStack, SM, myResolver, nullptr);
+ LLVMOrcAddEagerlyCompiledIR(JITStack, &CCtx->H, SM, myResolver, nullptr);
LLVMOrcDisposeSharedModuleRef(SM);
CCtx->Compiled = true;
- LLVMOrcTargetAddress MainAddr = LLVMOrcGetSymbolAddress(JITStack, "main");
+ LLVMOrcTargetAddress MainAddr;
+ LLVMOrcGetSymbolAddress(JITStack, &MainAddr, "main");
LLVMOrcSetIndirectStubPointer(JITStack, "foo", MainAddr);
return MainAddr;
}
@@ -89,10 +90,12 @@ TEST_F(OrcCAPIExecutionTest, TestEagerIRCompilation) {
LLVMOrcGetMangledSymbol(JIT, &testFuncName, "testFunc");
LLVMSharedModuleRef SM = LLVMOrcMakeSharedModule(wrap(M.release()));
- LLVMOrcModuleHandle H =
- LLVMOrcAddEagerlyCompiledIR(JIT, SM, myResolver, nullptr);
+ LLVMOrcModuleHandle H;
+ LLVMOrcAddEagerlyCompiledIR(JIT, &H, SM, myResolver, nullptr);
LLVMOrcDisposeSharedModuleRef(SM);
- MainFnTy MainFn = (MainFnTy)LLVMOrcGetSymbolAddress(JIT, "main");
+ LLVMOrcTargetAddress MainAddr;
+ LLVMOrcGetSymbolAddress(JIT, &MainAddr, "main");
+ MainFnTy MainFn = (MainFnTy)MainAddr;
int Result = MainFn();
EXPECT_EQ(Result, 42)
<< "Eagerly JIT'd code did not return expected result";
@@ -115,10 +118,12 @@ TEST_F(OrcCAPIExecutionTest, TestLazyIRCompilation) {
LLVMOrcGetMangledSymbol(JIT, &testFuncName, "testFunc");
LLVMSharedModuleRef SM = LLVMOrcMakeSharedModule(wrap(M.release()));
- LLVMOrcModuleHandle H =
- LLVMOrcAddLazilyCompiledIR(JIT, SM, myResolver, nullptr);
+ LLVMOrcModuleHandle H;
+ LLVMOrcAddLazilyCompiledIR(JIT, &H, SM, myResolver, nullptr);
LLVMOrcDisposeSharedModuleRef(SM);
- MainFnTy MainFn = (MainFnTy)LLVMOrcGetSymbolAddress(JIT, "main");
+ LLVMOrcTargetAddress MainAddr;
+ LLVMOrcGetSymbolAddress(JIT, &MainAddr, "main");
+ MainFnTy MainFn = (MainFnTy)MainAddr;
int Result = MainFn();
EXPECT_EQ(Result, 42)
<< "Lazily JIT'd code did not return expected result";
@@ -140,11 +145,12 @@ TEST_F(OrcCAPIExecutionTest, TestDirectCallbacksAPI) {
CompileContext C;
C.APIExecTest = this;
- LLVMOrcCreateIndirectStub(JIT, "foo",
- LLVMOrcCreateLazyCompileCallback(JIT,
- myCompileCallback,
- &C));
- MainFnTy FooFn = (MainFnTy)LLVMOrcGetSymbolAddress(JIT, "foo");
+ LLVMOrcTargetAddress CCAddr;
+ LLVMOrcCreateLazyCompileCallback(JIT, &CCAddr, myCompileCallback, &C);
+ LLVMOrcCreateIndirectStub(JIT, "foo", CCAddr);
+ LLVMOrcTargetAddress MainAddr;
+ LLVMOrcGetSymbolAddress(JIT, &MainAddr, "foo");
+ MainFnTy FooFn = (MainFnTy)MainAddr;
int Result = FooFn();
EXPECT_TRUE(C.Compiled)
<< "Function wasn't lazily compiled";
diff --git a/unittests/ExecutionEngine/Orc/OrcTestCommon.h b/unittests/ExecutionEngine/Orc/OrcTestCommon.h
index d7049ef00e6d..6c6b4918c205 100644
--- a/unittests/ExecutionEngine/Orc/OrcTestCommon.h
+++ b/unittests/ExecutionEngine/Orc/OrcTestCommon.h
@@ -119,19 +119,21 @@ public:
RemoveModuleFtor &&RemoveModule,
FindSymbolFtor &&FindSymbol,
FindSymbolInFtor &&FindSymbolIn)
- : AddModule(AddModule), RemoveModule(RemoveModule),
- FindSymbol(FindSymbol), FindSymbolIn(FindSymbolIn)
+ : AddModule(std::move(AddModule)),
+ RemoveModule(std::move(RemoveModule)),
+ FindSymbol(std::move(FindSymbol)),
+ FindSymbolIn(std::move(FindSymbolIn))
{}
template <typename ModuleT, typename MemoryManagerPtrT,
typename SymbolResolverPtrT>
- ModuleHandleT addModule(ModuleT Ms, MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
+ Expected<ModuleHandleT> addModule(ModuleT Ms, MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
return AddModule(std::move(Ms), std::move(MemMgr), std::move(Resolver));
}
- void removeModule(ModuleHandleT H) {
- RemoveModule(H);
+ Error removeModule(ModuleHandleT H) {
+ return RemoveModule(H);
}
JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
@@ -169,15 +171,24 @@ createMockBaseLayer(AddModuleFtor &&AddModule,
std::forward<FindSymbolInFtor>(FindSymbolIn));
}
+
+class ReturnNullJITSymbol {
+public:
+ template <typename... Args>
+ JITSymbol operator()(Args...) const {
+ return nullptr;
+ }
+};
+
template <typename ReturnT>
class DoNothingAndReturn {
public:
- DoNothingAndReturn(ReturnT Val) : Val(Val) {}
+ DoNothingAndReturn(ReturnT Ret) : Ret(std::move(Ret)) {}
template <typename... Args>
- ReturnT operator()(Args...) const { return Val; }
+ void operator()(Args...) const { return Ret; }
private:
- ReturnT Val;
+ ReturnT Ret;
};
template <>
diff --git a/unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp b/unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp
index 7c821bc2c34b..e4b61d855c5f 100644
--- a/unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp
+++ b/unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp
@@ -45,9 +45,9 @@ public:
};
TEST(RTDyldObjectLinkingLayerTest, TestSetProcessAllSections) {
- class SectionMemoryManagerWrapper : public SectionMemoryManager {
+ class MemoryManagerWrapper : public SectionMemoryManager {
public:
- SectionMemoryManagerWrapper(bool &DebugSeen) : DebugSeen(DebugSeen) {}
+ MemoryManagerWrapper(bool &DebugSeen) : DebugSeen(DebugSeen) {}
uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID,
StringRef SectionName,
@@ -63,7 +63,10 @@ TEST(RTDyldObjectLinkingLayerTest, TestSetProcessAllSections) {
bool &DebugSeen;
};
- RTDyldObjectLinkingLayer ObjLayer;
+ bool DebugSectionSeen = false;
+ auto MM = std::make_shared<MemoryManagerWrapper>(DebugSectionSeen);
+
+ RTDyldObjectLinkingLayer ObjLayer([&MM]() { return MM; });
LLVMContext Context;
auto M = llvm::make_unique<Module>("", Context);
@@ -89,9 +92,6 @@ TEST(RTDyldObjectLinkingLayerTest, TestSetProcessAllSections) {
std::make_shared<object::OwningBinary<object::ObjectFile>>(
SimpleCompiler(*TM)(*M));
- bool DebugSectionSeen = false;
- auto SMMW =
- std::make_shared<SectionMemoryManagerWrapper>(DebugSectionSeen);
auto Resolver =
createLambdaResolver(
[](const std::string &Name) {
@@ -103,21 +103,21 @@ TEST(RTDyldObjectLinkingLayerTest, TestSetProcessAllSections) {
{
// Test with ProcessAllSections = false (the default).
- auto H = ObjLayer.addObject(Obj, SMMW, &*Resolver);
- ObjLayer.emitAndFinalize(H);
+ auto H = cantFail(ObjLayer.addObject(Obj, Resolver));
+ cantFail(ObjLayer.emitAndFinalize(H));
EXPECT_EQ(DebugSectionSeen, false)
<< "Unexpected debug info section";
- ObjLayer.removeObject(H);
+ cantFail(ObjLayer.removeObject(H));
}
{
// Test with ProcessAllSections = true.
ObjLayer.setProcessAllSections(true);
- auto H = ObjLayer.addObject(Obj, SMMW, &*Resolver);
- ObjLayer.emitAndFinalize(H);
+ auto H = cantFail(ObjLayer.addObject(Obj, Resolver));
+ cantFail(ObjLayer.emitAndFinalize(H));
EXPECT_EQ(DebugSectionSeen, true)
<< "Expected debug info section not seen";
- ObjLayer.removeObject(H);
+ cantFail(ObjLayer.removeObject(H));
}
}
@@ -125,7 +125,9 @@ TEST_F(RTDyldObjectLinkingLayerExecutionTest, NoDuplicateFinalization) {
if (!TM)
return;
- RTDyldObjectLinkingLayer ObjLayer;
+ auto MM = std::make_shared<SectionMemoryManagerWrapper>();
+
+ RTDyldObjectLinkingLayer ObjLayer([&MM]() { return MM; });
SimpleCompiler Compile(*TM);
// Create a pair of modules that will trigger recursive finalization:
@@ -179,15 +181,14 @@ TEST_F(RTDyldObjectLinkingLayerExecutionTest, NoDuplicateFinalization) {
return JITSymbol(nullptr);
});
- auto SMMW = std::make_shared<SectionMemoryManagerWrapper>();
- ObjLayer.addObject(std::move(Obj1), SMMW, &*Resolver);
- auto H = ObjLayer.addObject(std::move(Obj2), SMMW, &*Resolver);
- ObjLayer.emitAndFinalize(H);
- ObjLayer.removeObject(H);
-
+ cantFail(ObjLayer.addObject(std::move(Obj1), Resolver));
+ auto H = cantFail(ObjLayer.addObject(std::move(Obj2), Resolver));
+ cantFail(ObjLayer.emitAndFinalize(H));
+ cantFail(ObjLayer.removeObject(H));
+
// Finalization of module 2 should trigger finalization of module 1.
// Verify that finalize on SMMW is only called once.
- EXPECT_EQ(SMMW->FinalizationCount, 1)
+ EXPECT_EQ(MM->FinalizationCount, 1)
<< "Extra call to finalize";
}
@@ -195,7 +196,9 @@ TEST_F(RTDyldObjectLinkingLayerExecutionTest, NoPrematureAllocation) {
if (!TM)
return;
- RTDyldObjectLinkingLayer ObjLayer;
+ auto MM = std::make_shared<SectionMemoryManagerWrapper>();
+
+ RTDyldObjectLinkingLayer ObjLayer([&MM]() { return MM; });
SimpleCompiler Compile(*TM);
// Create a pair of unrelated modules:
@@ -240,15 +243,14 @@ TEST_F(RTDyldObjectLinkingLayerExecutionTest, NoPrematureAllocation) {
std::make_shared<object::OwningBinary<object::ObjectFile>>(
Compile(*MB2.getModule()));
- auto SMMW = std::make_shared<SectionMemoryManagerWrapper>();
- NullResolver NR;
- auto H = ObjLayer.addObject(std::move(Obj1), SMMW, &NR);
- ObjLayer.addObject(std::move(Obj2), SMMW, &NR);
- ObjLayer.emitAndFinalize(H);
- ObjLayer.removeObject(H);
-
+ auto NR = std::make_shared<NullResolver>();
+ auto H = cantFail(ObjLayer.addObject(std::move(Obj1), NR));
+ cantFail(ObjLayer.addObject(std::move(Obj2), NR));
+ cantFail(ObjLayer.emitAndFinalize(H));
+ cantFail(ObjLayer.removeObject(H));
+
// Only one call to needsToReserveAllocationSpace should have been made.
- EXPECT_EQ(SMMW->NeedsToReserveAllocationSpaceCount, 1)
+ EXPECT_EQ(MM->NeedsToReserveAllocationSpaceCount, 1)
<< "More than one call to needsToReserveAllocationSpace "
"(multiple unrelated objects loaded prior to finalization)";
}
diff --git a/unittests/IR/CMakeLists.txt b/unittests/IR/CMakeLists.txt
index 6734de8e2d95..d76ebfa64d88 100644
--- a/unittests/IR/CMakeLists.txt
+++ b/unittests/IR/CMakeLists.txt
@@ -3,6 +3,7 @@ set(LLVM_LINK_COMPONENTS
AsmParser
Core
Support
+ Passes
)
set(IRSources
@@ -15,6 +16,7 @@ set(IRSources
DebugTypeODRUniquingTest.cpp
DominatorTreeTest.cpp
FunctionTest.cpp
+ PassBuilderCallbacksTest.cpp
IRBuilderTest.cpp
InstructionsTest.cpp
IntrinsicsTest.cpp
diff --git a/unittests/IR/ModuleTest.cpp b/unittests/IR/ModuleTest.cpp
index d93d036bb115..af55a098adda 100644
--- a/unittests/IR/ModuleTest.cpp
+++ b/unittests/IR/ModuleTest.cpp
@@ -63,7 +63,7 @@ TEST(ModuleTest, randomNumberGenerator) {
std::array<int, NBCheck> RandomStreams[2];
for (auto &RandomStream : RandomStreams) {
- std::unique_ptr<RandomNumberGenerator> RNG{M.createRNG(&DP)};
+ std::unique_ptr<RandomNumberGenerator> RNG = M.createRNG(&DP);
std::generate(RandomStream.begin(), RandomStream.end(),
[&]() { return dist(*RNG); });
}
diff --git a/unittests/IR/PassBuilderCallbacksTest.cpp b/unittests/IR/PassBuilderCallbacksTest.cpp
new file mode 100644
index 000000000000..df0b11f6cc71
--- /dev/null
+++ b/unittests/IR/PassBuilderCallbacksTest.cpp
@@ -0,0 +1,520 @@
+//===- unittests/IR/PassBuilderCallbacksTest.cpp - PB Callback Tests --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <llvm/Analysis/CGSCCPassManager.h>
+#include <llvm/Analysis/LoopAnalysisManager.h>
+#include <llvm/AsmParser/Parser.h>
+#include <llvm/IR/LLVMContext.h>
+#include <llvm/IR/PassManager.h>
+#include <llvm/Passes/PassBuilder.h>
+#include <llvm/Support/SourceMgr.h>
+#include <llvm/Transforms/Scalar/LoopPassManager.h>
+
+using namespace llvm;
+
+namespace llvm {
+/// Provide an ostream operator for StringRef.
+///
+/// For convenience we provide a custom matcher below for IRUnit's and analysis
+/// result's getName functions, which most of the time returns a StringRef. The
+/// matcher makes use of this operator.
+static std::ostream &operator<<(std::ostream &O, StringRef S) {
+ return O << S.str();
+}
+}
+
+namespace {
+using testing::DoDefault;
+using testing::Return;
+using testing::Expectation;
+using testing::Invoke;
+using testing::WithArgs;
+using testing::_;
+
+/// \brief A CRTP base for analysis mock handles
+///
+/// This class reconciles mocking with the value semantics implementation of the
+/// AnalysisManager. Analysis mock handles should derive from this class and
+/// call \c setDefault() in their constroctur for wiring up the defaults defined
+/// by this base with their mock run() and invalidate() implementations.
+template <typename DerivedT, typename IRUnitT,
+ typename AnalysisManagerT = AnalysisManager<IRUnitT>,
+ typename... ExtraArgTs>
+class MockAnalysisHandleBase {
+public:
+ class Analysis : public AnalysisInfoMixin<Analysis> {
+ friend AnalysisInfoMixin<Analysis>;
+ friend MockAnalysisHandleBase;
+ static AnalysisKey Key;
+
+ DerivedT *Handle;
+
+ Analysis(DerivedT &Handle) : Handle(&Handle) {
+ static_assert(std::is_base_of<MockAnalysisHandleBase, DerivedT>::value,
+ "Must pass the derived type to this template!");
+ }
+
+ public:
+ class Result {
+ friend MockAnalysisHandleBase;
+
+ DerivedT *Handle;
+
+ Result(DerivedT &Handle) : Handle(&Handle) {}
+
+ public:
+ // Forward invalidation events to the mock handle.
+ bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA,
+ typename AnalysisManagerT::Invalidator &Inv) {
+ return Handle->invalidate(IR, PA, Inv);
+ }
+ };
+
+ Result run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs) {
+ return Handle->run(IR, AM, ExtraArgs...);
+ }
+ };
+
+ Analysis getAnalysis() { return Analysis(static_cast<DerivedT &>(*this)); }
+ typename Analysis::Result getResult() {
+ return typename Analysis::Result(static_cast<DerivedT &>(*this));
+ }
+
+protected:
+ // FIXME: MSVC seems unable to handle a lambda argument to Invoke from within
+ // the template, so we use a boring static function.
+ static bool invalidateCallback(IRUnitT &IR, const PreservedAnalyses &PA,
+ typename AnalysisManagerT::Invalidator &Inv) {
+ auto PAC = PA.template getChecker<Analysis>();
+ return !PAC.preserved() &&
+ !PAC.template preservedSet<AllAnalysesOn<IRUnitT>>();
+ }
+
+ /// Derived classes should call this in their constructor to set up default
+ /// mock actions. (We can't do this in our constructor because this has to
+ /// run after the DerivedT is constructed.)
+ void setDefaults() {
+ ON_CALL(static_cast<DerivedT &>(*this),
+ run(_, _, testing::Matcher<ExtraArgTs>(_)...))
+ .WillByDefault(Return(this->getResult()));
+ ON_CALL(static_cast<DerivedT &>(*this), invalidate(_, _, _))
+ .WillByDefault(Invoke(&invalidateCallback));
+ }
+};
+
+/// \brief A CRTP base for pass mock handles
+///
+/// This class reconciles mocking with the value semantics implementation of the
+/// PassManager. Pass mock handles should derive from this class and
+/// call \c setDefault() in their constroctur for wiring up the defaults defined
+/// by this base with their mock run() and invalidate() implementations.
+template <typename DerivedT, typename IRUnitT, typename AnalysisManagerT,
+ typename... ExtraArgTs>
+AnalysisKey MockAnalysisHandleBase<DerivedT, IRUnitT, AnalysisManagerT,
+ ExtraArgTs...>::Analysis::Key;
+
+template <typename DerivedT, typename IRUnitT,
+ typename AnalysisManagerT = AnalysisManager<IRUnitT>,
+ typename... ExtraArgTs>
+class MockPassHandleBase {
+public:
+ class Pass : public PassInfoMixin<Pass> {
+ friend MockPassHandleBase;
+
+ DerivedT *Handle;
+
+ Pass(DerivedT &Handle) : Handle(&Handle) {
+ static_assert(std::is_base_of<MockPassHandleBase, DerivedT>::value,
+ "Must pass the derived type to this template!");
+ }
+
+ public:
+ PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
+ ExtraArgTs... ExtraArgs) {
+ return Handle->run(IR, AM, ExtraArgs...);
+ }
+ };
+
+ Pass getPass() { return Pass(static_cast<DerivedT &>(*this)); }
+
+protected:
+ /// Derived classes should call this in their constructor to set up default
+ /// mock actions. (We can't do this in our constructor because this has to
+ /// run after the DerivedT is constructed.)
+ void setDefaults() {
+ ON_CALL(static_cast<DerivedT &>(*this),
+ run(_, _, testing::Matcher<ExtraArgTs>(_)...))
+ .WillByDefault(Return(PreservedAnalyses::all()));
+ }
+};
+
+/// Mock handles for passes for the IRUnits Module, CGSCC, Function, Loop.
+/// These handles define the appropriate run() mock interface for the respective
+/// IRUnit type.
+template <typename IRUnitT> struct MockPassHandle;
+template <>
+struct MockPassHandle<Loop>
+ : MockPassHandleBase<MockPassHandle<Loop>, Loop, LoopAnalysisManager,
+ LoopStandardAnalysisResults &, LPMUpdater &> {
+ MOCK_METHOD4(run,
+ PreservedAnalyses(Loop &, LoopAnalysisManager &,
+ LoopStandardAnalysisResults &, LPMUpdater &));
+ MockPassHandle() { setDefaults(); }
+};
+
+template <>
+struct MockPassHandle<Function>
+ : MockPassHandleBase<MockPassHandle<Function>, Function> {
+ MOCK_METHOD2(run, PreservedAnalyses(Function &, FunctionAnalysisManager &));
+
+ MockPassHandle() { setDefaults(); }
+};
+
+template <>
+struct MockPassHandle<LazyCallGraph::SCC>
+ : MockPassHandleBase<MockPassHandle<LazyCallGraph::SCC>, LazyCallGraph::SCC,
+ CGSCCAnalysisManager, LazyCallGraph &,
+ CGSCCUpdateResult &> {
+ MOCK_METHOD4(run,
+ PreservedAnalyses(LazyCallGraph::SCC &, CGSCCAnalysisManager &,
+ LazyCallGraph &G, CGSCCUpdateResult &UR));
+
+ MockPassHandle() { setDefaults(); }
+};
+
+template <>
+struct MockPassHandle<Module>
+ : MockPassHandleBase<MockPassHandle<Module>, Module> {
+ MOCK_METHOD2(run, PreservedAnalyses(Module &, ModuleAnalysisManager &));
+
+ MockPassHandle() { setDefaults(); }
+};
+
+/// Mock handles for analyses for the IRUnits Module, CGSCC, Function, Loop.
+/// These handles define the appropriate run() and invalidate() mock interfaces
+/// for the respective IRUnit type.
+template <typename IRUnitT> struct MockAnalysisHandle;
+template <>
+struct MockAnalysisHandle<Loop>
+ : MockAnalysisHandleBase<MockAnalysisHandle<Loop>, Loop,
+ LoopAnalysisManager,
+ LoopStandardAnalysisResults &> {
+
+ MOCK_METHOD3_T(run, typename Analysis::Result(Loop &, LoopAnalysisManager &,
+ LoopStandardAnalysisResults &));
+
+ MOCK_METHOD3_T(invalidate, bool(Loop &, const PreservedAnalyses &,
+ LoopAnalysisManager::Invalidator &));
+
+ MockAnalysisHandle<Loop>() { this->setDefaults(); }
+};
+
+template <>
+struct MockAnalysisHandle<Function>
+ : MockAnalysisHandleBase<MockAnalysisHandle<Function>, Function> {
+ MOCK_METHOD2(run, Analysis::Result(Function &, FunctionAnalysisManager &));
+
+ MOCK_METHOD3(invalidate, bool(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &));
+
+ MockAnalysisHandle<Function>() { setDefaults(); }
+};
+
+template <>
+struct MockAnalysisHandle<LazyCallGraph::SCC>
+ : MockAnalysisHandleBase<MockAnalysisHandle<LazyCallGraph::SCC>,
+ LazyCallGraph::SCC, CGSCCAnalysisManager,
+ LazyCallGraph &> {
+ MOCK_METHOD3(run, Analysis::Result(LazyCallGraph::SCC &,
+ CGSCCAnalysisManager &, LazyCallGraph &));
+
+ MOCK_METHOD3(invalidate, bool(LazyCallGraph::SCC &, const PreservedAnalyses &,
+ CGSCCAnalysisManager::Invalidator &));
+
+ MockAnalysisHandle<LazyCallGraph::SCC>() { setDefaults(); }
+};
+
+template <>
+struct MockAnalysisHandle<Module>
+ : MockAnalysisHandleBase<MockAnalysisHandle<Module>, Module> {
+ MOCK_METHOD2(run, Analysis::Result(Module &, ModuleAnalysisManager &));
+
+ MOCK_METHOD3(invalidate, bool(Module &, const PreservedAnalyses &,
+ ModuleAnalysisManager::Invalidator &));
+
+ MockAnalysisHandle<Module>() { setDefaults(); }
+};
+
+static std::unique_ptr<Module> parseIR(LLVMContext &C, const char *IR) {
+ SMDiagnostic Err;
+ return parseAssemblyString(IR, Err, C);
+}
+
+template <typename PassManagerT> class PassBuilderCallbacksTest;
+
+/// This test fixture is shared between all the actual tests below and
+/// takes care of setting up appropriate defaults.
+///
+/// The template specialization serves to extract the IRUnit and AM types from
+/// the given PassManagerT.
+template <typename TestIRUnitT, typename... ExtraPassArgTs,
+ typename... ExtraAnalysisArgTs>
+class PassBuilderCallbacksTest<PassManager<
+ TestIRUnitT, AnalysisManager<TestIRUnitT, ExtraAnalysisArgTs...>,
+ ExtraPassArgTs...>> : public testing::Test {
+protected:
+ using IRUnitT = TestIRUnitT;
+ using AnalysisManagerT = AnalysisManager<TestIRUnitT, ExtraAnalysisArgTs...>;
+ using PassManagerT =
+ PassManager<TestIRUnitT, AnalysisManagerT, ExtraPassArgTs...>;
+ using AnalysisT = typename MockAnalysisHandle<IRUnitT>::Analysis;
+
+ LLVMContext Context;
+ std::unique_ptr<Module> M;
+
+ PassBuilder PB;
+ ModulePassManager PM;
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager AM;
+
+ MockPassHandle<IRUnitT> PassHandle;
+ MockAnalysisHandle<IRUnitT> AnalysisHandle;
+
+ static PreservedAnalyses getAnalysisResult(IRUnitT &U, AnalysisManagerT &AM,
+ ExtraAnalysisArgTs &&... Args) {
+ (void)AM.template getResult<AnalysisT>(
+ U, std::forward<ExtraAnalysisArgTs>(Args)...);
+ return PreservedAnalyses::all();
+ }
+
+ PassBuilderCallbacksTest()
+ : M(parseIR(Context,
+ "declare void @bar()\n"
+ "define void @foo(i32 %n) {\n"
+ "entry:\n"
+ " br label %loop\n"
+ "loop:\n"
+ " %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]\n"
+ " %iv.next = add i32 %iv, 1\n"
+ " tail call void @bar()\n"
+ " %cmp = icmp eq i32 %iv, %n\n"
+ " br i1 %cmp, label %exit, label %loop\n"
+ "exit:\n"
+ " ret void\n"
+ "}\n")),
+ PM(true), LAM(true), FAM(true), CGAM(true), AM(true) {
+
+ /// Register a callback for analysis registration.
+ ///
+ /// The callback is a function taking a reference to an AnalyisManager
+ /// object. When called, the callee gets to register its own analyses with
+ /// this PassBuilder instance.
+ PB.registerAnalysisRegistrationCallback([this](AnalysisManagerT &AM) {
+ // Register our mock analysis
+ AM.registerPass([this] { return AnalysisHandle.getAnalysis(); });
+ });
+
+ /// Register a callback for pipeline parsing.
+ ///
+ /// During parsing of a textual pipeline, the PassBuilder will call these
+ /// callbacks for each encountered pass name that it does not know. This
+ /// includes both simple pass names as well as names of sub-pipelines. In
+ /// the latter case, the InnerPipeline is not empty.
+ PB.registerPipelineParsingCallback(
+ [this](StringRef Name, PassManagerT &PM,
+ ArrayRef<PassBuilder::PipelineElement> InnerPipeline) {
+ /// Handle parsing of the names of analysis utilities such as
+ /// require<test-analysis> and invalidate<test-analysis> for our
+ /// analysis mock handle
+ if (parseAnalysisUtilityPasses<AnalysisT>("test-analysis", Name, PM))
+ return true;
+
+ /// Parse the name of our pass mock handle
+ if (Name == "test-transform") {
+ PM.addPass(PassHandle.getPass());
+ return true;
+ }
+ return false;
+ });
+
+ /// Register builtin analyses and cross-register the analysis proxies
+ PB.registerModuleAnalyses(AM);
+ PB.registerCGSCCAnalyses(CGAM);
+ PB.registerFunctionAnalyses(FAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, AM);
+ }
+};
+
+/// Define a custom matcher for objects which support a 'getName' method.
+///
+/// LLVM often has IR objects or analysis objects which expose a name
+/// and in tests it is convenient to match these by name for readability.
+/// Usually, this name is either a StringRef or a plain std::string. This
+/// matcher supports any type exposing a getName() method of this form whose
+/// return value is compatible with an std::ostream. For StringRef, this uses
+/// the shift operator defined above.
+///
+/// It should be used as:
+///
+/// HasName("my_function")
+///
+/// No namespace or other qualification is required.
+MATCHER_P(HasName, Name, "") {
+ *result_listener << "has name '" << arg.getName() << "'";
+ return Name == arg.getName();
+}
+
+using ModuleCallbacksTest = PassBuilderCallbacksTest<ModulePassManager>;
+using CGSCCCallbacksTest = PassBuilderCallbacksTest<CGSCCPassManager>;
+using FunctionCallbacksTest = PassBuilderCallbacksTest<FunctionPassManager>;
+using LoopCallbacksTest = PassBuilderCallbacksTest<LoopPassManager>;
+
+/// Test parsing of the name of our mock pass for all IRUnits.
+///
+/// The pass should by default run our mock analysis and then preserve it.
+TEST_F(ModuleCallbacksTest, Passes) {
+ EXPECT_CALL(AnalysisHandle, run(HasName("<string>"), _));
+ EXPECT_CALL(PassHandle, run(HasName("<string>"), _))
+ .WillOnce(Invoke(getAnalysisResult));
+
+ StringRef PipelineText = "test-transform";
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+}
+
+TEST_F(FunctionCallbacksTest, Passes) {
+ EXPECT_CALL(AnalysisHandle, run(HasName("foo"), _));
+ EXPECT_CALL(PassHandle, run(HasName("foo"), _))
+ .WillOnce(Invoke(getAnalysisResult));
+
+ StringRef PipelineText = "test-transform";
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+}
+
+TEST_F(LoopCallbacksTest, Passes) {
+ EXPECT_CALL(AnalysisHandle, run(HasName("loop"), _, _));
+ EXPECT_CALL(PassHandle, run(HasName("loop"), _, _, _))
+ .WillOnce(WithArgs<0, 1, 2>(Invoke(getAnalysisResult)));
+
+ StringRef PipelineText = "test-transform";
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+}
+
+TEST_F(CGSCCCallbacksTest, Passes) {
+ EXPECT_CALL(AnalysisHandle, run(HasName("(foo)"), _, _));
+ EXPECT_CALL(PassHandle, run(HasName("(foo)"), _, _, _))
+ .WillOnce(WithArgs<0, 1, 2>(Invoke(getAnalysisResult)));
+
+ StringRef PipelineText = "test-transform";
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+}
+
+/// Test parsing of the names of analysis utilities for our mock analysis
+/// for all IRUnits.
+///
+/// We first require<>, then invalidate<> it, expecting the analysis to be run
+/// once and subsequently invalidated.
+TEST_F(ModuleCallbacksTest, AnalysisUtilities) {
+ EXPECT_CALL(AnalysisHandle, run(HasName("<string>"), _));
+ EXPECT_CALL(AnalysisHandle, invalidate(HasName("<string>"), _, _));
+
+ StringRef PipelineText = "require<test-analysis>,invalidate<test-analysis>";
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+}
+
+TEST_F(CGSCCCallbacksTest, PassUtilities) {
+ EXPECT_CALL(AnalysisHandle, run(HasName("(foo)"), _, _));
+ EXPECT_CALL(AnalysisHandle, invalidate(HasName("(foo)"), _, _));
+
+ StringRef PipelineText = "require<test-analysis>,invalidate<test-analysis>";
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+}
+
+TEST_F(FunctionCallbacksTest, AnalysisUtilities) {
+ EXPECT_CALL(AnalysisHandle, run(HasName("foo"), _));
+ EXPECT_CALL(AnalysisHandle, invalidate(HasName("foo"), _, _));
+
+ StringRef PipelineText = "require<test-analysis>,invalidate<test-analysis>";
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+}
+
+TEST_F(LoopCallbacksTest, PassUtilities) {
+ EXPECT_CALL(AnalysisHandle, run(HasName("loop"), _, _));
+ EXPECT_CALL(AnalysisHandle, invalidate(HasName("loop"), _, _));
+
+ StringRef PipelineText = "require<test-analysis>,invalidate<test-analysis>";
+
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+}
+
+/// Test parsing of the top-level pipeline.
+///
+/// The ParseTopLevelPipeline callback takes over parsing of the entire pipeline
+/// from PassBuilder if it encounters an unknown pipeline entry at the top level
+/// (i.e., the first entry on the pipeline).
+/// This test parses a pipeline named 'another-pipeline', whose only elements
+/// may be the test-transform pass or the analysis utilities
+TEST_F(ModuleCallbacksTest, ParseTopLevelPipeline) {
+ PB.registerParseTopLevelPipelineCallback([this](
+ ModulePassManager &MPM, ArrayRef<PassBuilder::PipelineElement> Pipeline,
+ bool VerifyEachPass, bool DebugLogging) {
+ auto &FirstName = Pipeline.front().Name;
+ auto &InnerPipeline = Pipeline.front().InnerPipeline;
+ if (FirstName == "another-pipeline") {
+ for (auto &E : InnerPipeline) {
+ if (parseAnalysisUtilityPasses<AnalysisT>("test-analysis", E.Name, PM))
+ continue;
+
+ if (E.Name == "test-transform") {
+ PM.addPass(PassHandle.getPass());
+ continue;
+ }
+ return false;
+ }
+ }
+ return true;
+ });
+
+ EXPECT_CALL(AnalysisHandle, run(HasName("<string>"), _));
+ EXPECT_CALL(PassHandle, run(HasName("<string>"), _))
+ .WillOnce(Invoke(getAnalysisResult));
+ EXPECT_CALL(AnalysisHandle, invalidate(HasName("<string>"), _, _));
+
+ StringRef PipelineText =
+ "another-pipeline(test-transform,invalidate<test-analysis>)";
+ ASSERT_TRUE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+ PM.run(*M, AM);
+
+ /// Test the negative case
+ PipelineText = "another-pipeline(instcombine)";
+ ASSERT_FALSE(PB.parsePassPipeline(PM, PipelineText, true))
+ << "Pipeline was: " << PipelineText;
+}
+} // end anonymous namespace
diff --git a/unittests/IR/PassManagerTest.cpp b/unittests/IR/PassManagerTest.cpp
index ad06cc4778fe..0131bce3d2b2 100644
--- a/unittests/IR/PassManagerTest.cpp
+++ b/unittests/IR/PassManagerTest.cpp
@@ -210,6 +210,13 @@ TEST(PreservedAnalysesTest, Basic) {
EXPECT_FALSE(PAC.preserved());
EXPECT_FALSE(PAC.preservedSet<AllAnalysesOn<Function>>());
}
+ auto PA5 = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
+ {
+ auto PAC = PA5.getChecker<TestFunctionAnalysis>();
+ EXPECT_FALSE(PAC.preserved());
+ EXPECT_TRUE(PAC.preservedSet<AllAnalysesOn<Function>>());
+ EXPECT_FALSE(PAC.preservedSet<AllAnalysesOn<Module>>());
+ }
}
TEST(PreservedAnalysesTest, Preserve) {
diff --git a/unittests/ProfileData/CMakeLists.txt b/unittests/ProfileData/CMakeLists.txt
index dd39ca7da3ad..80f9ada7b83a 100644
--- a/unittests/ProfileData/CMakeLists.txt
+++ b/unittests/ProfileData/CMakeLists.txt
@@ -10,3 +10,5 @@ add_llvm_unittest(ProfileDataTests
InstrProfTest.cpp
SampleProfTest.cpp
)
+
+target_link_libraries(ProfileDataTests LLVMTestingSupport)
diff --git a/unittests/ProfileData/CoverageMappingTest.cpp b/unittests/ProfileData/CoverageMappingTest.cpp
index 1d621f4060ca..6588e753eab0 100644
--- a/unittests/ProfileData/CoverageMappingTest.cpp
+++ b/unittests/ProfileData/CoverageMappingTest.cpp
@@ -13,6 +13,8 @@
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/ProfileData/InstrProfWriter.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Testing/Support/Error.h"
+#include "llvm/Testing/Support/SupportHelpers.h"
#include "gtest/gtest.h"
#include <ostream>
@@ -21,15 +23,8 @@
using namespace llvm;
using namespace coverage;
-static ::testing::AssertionResult NoError(Error E) {
- if (!E)
- return ::testing::AssertionSuccess();
- return ::testing::AssertionFailure() << "error: " << toString(std::move(E))
- << "\n";
-}
-
-static ::testing::AssertionResult ErrorEquals(coveragemap_error Expected,
- Error E) {
+LLVM_NODISCARD static ::testing::AssertionResult
+ErrorEquals(coveragemap_error Expected, Error E) {
coveragemap_error Found;
std::string FoundMsg;
handleAllErrors(std::move(E), [&](const CoverageMapError &CME) {
@@ -209,7 +204,7 @@ struct CoverageMappingTest : ::testing::TestWithParam<std::pair<bool, bool>> {
std::vector<CounterExpression> Expressions;
RawCoverageMappingReader Reader(Coverage, Filenames, Data.Filenames,
Expressions, Data.Regions);
- ASSERT_TRUE(NoError(Reader.read()));
+ EXPECT_THAT_ERROR(Reader.read(), Succeeded());
}
void writeAndReadCoverageRegions(bool EmitFilenames = true) {
@@ -227,7 +222,7 @@ struct CoverageMappingTest : ::testing::TestWithParam<std::pair<bool, bool>> {
void readProfCounts() {
auto Profile = ProfileWriter.writeBuffer();
auto ReaderOrErr = IndexedInstrProfReader::create(std::move(Profile));
- ASSERT_TRUE(NoError(ReaderOrErr.takeError()));
+ EXPECT_THAT_ERROR(ReaderOrErr.takeError(), Succeeded());
ProfileReader = std::move(ReaderOrErr.get());
}
@@ -308,9 +303,10 @@ TEST_P(CoverageMappingTest, correct_deserialize_for_more_than_two_files) {
}
}
+static const auto Err = [](Error E) { FAIL(); };
+
TEST_P(CoverageMappingTest, load_coverage_for_more_than_two_files) {
- InstrProfRecord Record("func", 0x1234, {0});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"func", 0x1234, {0}}, Err);
const char *FileNames[] = {"bar", "baz", "foo"};
static const unsigned N = array_lengthof(FileNames);
@@ -321,7 +317,7 @@ TEST_P(CoverageMappingTest, load_coverage_for_more_than_two_files) {
// in order to preserve that information during possible sorting of CMRs.
addCMR(Counter::getCounter(0), FileNames[I], I, 1, I, 1);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
for (unsigned I = 0; I < N; ++I) {
CoverageData Data = LoadedCoverage->getCoverageForFile(FileNames[I]);
@@ -331,18 +327,15 @@ TEST_P(CoverageMappingTest, load_coverage_for_more_than_two_files) {
}
TEST_P(CoverageMappingTest, load_coverage_with_bogus_function_name) {
- InstrProfRecord RecordFunc1("", 0x1234, {10});
- NoError(ProfileWriter.addRecord(std::move(RecordFunc1)));
+ ProfileWriter.addRecord({"", 0x1234, {10}}, Err);
startFunction("", 0x1234);
addCMR(Counter::getCounter(0), "foo", 1, 1, 5, 5);
- ErrorEquals(coveragemap_error::malformed, loadCoverageMapping());
+ EXPECT_TRUE(ErrorEquals(coveragemap_error::malformed, loadCoverageMapping()));
}
TEST_P(CoverageMappingTest, load_coverage_for_several_functions) {
- InstrProfRecord RecordFunc1("func1", 0x1234, {10});
- NoError(ProfileWriter.addRecord(std::move(RecordFunc1)));
- InstrProfRecord RecordFunc2("func2", 0x2345, {20});
- NoError(ProfileWriter.addRecord(std::move(RecordFunc2)));
+ ProfileWriter.addRecord({"func1", 0x1234, {10}}, Err);
+ ProfileWriter.addRecord({"func2", 0x2345, {20}}, Err);
startFunction("func1", 0x1234);
addCMR(Counter::getCounter(0), "foo", 1, 1, 5, 5);
@@ -350,7 +343,7 @@ TEST_P(CoverageMappingTest, load_coverage_for_several_functions) {
startFunction("func2", 0x2345);
addCMR(Counter::getCounter(0), "bar", 2, 2, 6, 6);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
const auto FunctionRecords = LoadedCoverage->getCoveredFunctions();
EXPECT_EQ(2, std::distance(FunctionRecords.begin(), FunctionRecords.end()));
@@ -386,15 +379,14 @@ TEST_P(CoverageMappingTest, expansion_gets_first_counter) {
}
TEST_P(CoverageMappingTest, basic_coverage_iteration) {
- InstrProfRecord Record("func", 0x1234, {30, 20, 10, 0});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"func", 0x1234, {30, 20, 10, 0}}, Err);
startFunction("func", 0x1234);
addCMR(Counter::getCounter(0), "file1", 1, 1, 9, 9);
addCMR(Counter::getCounter(1), "file1", 1, 1, 4, 7);
addCMR(Counter::getCounter(2), "file1", 5, 8, 9, 1);
addCMR(Counter::getCounter(3), "file1", 10, 10, 11, 11);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
CoverageData Data = LoadedCoverage->getCoverageForFile("file1");
std::vector<CoverageSegment> Segments(Data.begin(), Data.end());
@@ -411,7 +403,7 @@ TEST_P(CoverageMappingTest, basic_coverage_iteration) {
TEST_P(CoverageMappingTest, uncovered_function) {
startFunction("func", 0x1234);
addCMR(Counter::getZero(), "file1", 1, 2, 3, 4);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
CoverageData Data = LoadedCoverage->getCoverageForFile("file1");
std::vector<CoverageSegment> Segments(Data.begin(), Data.end());
@@ -424,7 +416,7 @@ TEST_P(CoverageMappingTest, uncovered_function_with_mapping) {
startFunction("func", 0x1234);
addCMR(Counter::getCounter(0), "file1", 1, 1, 9, 9);
addCMR(Counter::getCounter(1), "file1", 1, 1, 4, 7);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
CoverageData Data = LoadedCoverage->getCoverageForFile("file1");
std::vector<CoverageSegment> Segments(Data.begin(), Data.end());
@@ -435,14 +427,13 @@ TEST_P(CoverageMappingTest, uncovered_function_with_mapping) {
}
TEST_P(CoverageMappingTest, combine_regions) {
- InstrProfRecord Record("func", 0x1234, {10, 20, 30});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"func", 0x1234, {10, 20, 30}}, Err);
startFunction("func", 0x1234);
addCMR(Counter::getCounter(0), "file1", 1, 1, 9, 9);
addCMR(Counter::getCounter(1), "file1", 3, 3, 4, 4);
addCMR(Counter::getCounter(2), "file1", 3, 3, 4, 4);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
CoverageData Data = LoadedCoverage->getCoverageForFile("file1");
std::vector<CoverageSegment> Segments(Data.begin(), Data.end());
@@ -454,14 +445,13 @@ TEST_P(CoverageMappingTest, combine_regions) {
}
TEST_P(CoverageMappingTest, restore_combined_counter_after_nested_region) {
- InstrProfRecord Record("func", 0x1234, {10, 20, 40});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"func", 0x1234, {10, 20, 40}}, Err);
startFunction("func", 0x1234);
addCMR(Counter::getCounter(0), "file1", 1, 1, 9, 9);
addCMR(Counter::getCounter(1), "file1", 1, 1, 9, 9);
addCMR(Counter::getCounter(2), "file1", 3, 3, 5, 5);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
CoverageData Data = LoadedCoverage->getCoverageForFile("file1");
std::vector<CoverageSegment> Segments(Data.begin(), Data.end());
@@ -475,17 +465,15 @@ TEST_P(CoverageMappingTest, restore_combined_counter_after_nested_region) {
// If CodeRegions and ExpansionRegions cover the same area,
// only counts of CodeRegions should be used.
TEST_P(CoverageMappingTest, dont_combine_expansions) {
- InstrProfRecord Record1("func", 0x1234, {10, 20});
- InstrProfRecord Record2("func", 0x1234, {0, 0});
- NoError(ProfileWriter.addRecord(std::move(Record1)));
- NoError(ProfileWriter.addRecord(std::move(Record2)));
+ ProfileWriter.addRecord({"func", 0x1234, {10, 20}}, Err);
+ ProfileWriter.addRecord({"func", 0x1234, {0, 0}}, Err);
startFunction("func", 0x1234);
addCMR(Counter::getCounter(0), "file1", 1, 1, 9, 9);
addCMR(Counter::getCounter(1), "file1", 3, 3, 4, 4);
addCMR(Counter::getCounter(1), "include1", 6, 6, 7, 7);
addExpansionCMR("file1", "include1", 3, 3, 4, 4);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
CoverageData Data = LoadedCoverage->getCoverageForFile("file1");
std::vector<CoverageSegment> Segments(Data.begin(), Data.end());
@@ -498,8 +486,7 @@ TEST_P(CoverageMappingTest, dont_combine_expansions) {
// If an area is covered only by ExpansionRegions, they should be combinated.
TEST_P(CoverageMappingTest, combine_expansions) {
- InstrProfRecord Record("func", 0x1234, {2, 3, 7});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"func", 0x1234, {2, 3, 7}}, Err);
startFunction("func", 0x1234);
addCMR(Counter::getCounter(1), "include1", 1, 1, 1, 10);
@@ -508,7 +495,7 @@ TEST_P(CoverageMappingTest, combine_expansions) {
addExpansionCMR("file", "include1", 3, 1, 3, 5);
addExpansionCMR("file", "include2", 3, 1, 3, 5);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
CoverageData Data = LoadedCoverage->getCoverageForFile("file");
std::vector<CoverageSegment> Segments(Data.begin(), Data.end());
@@ -520,12 +507,11 @@ TEST_P(CoverageMappingTest, combine_expansions) {
}
TEST_P(CoverageMappingTest, strip_filename_prefix) {
- InstrProfRecord Record("file1:func", 0x1234, {0});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"file1:func", 0x1234, {0}}, Err);
startFunction("file1:func", 0x1234);
addCMR(Counter::getCounter(0), "file1", 1, 1, 9, 9);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
std::vector<std::string> Names;
for (const auto &Func : LoadedCoverage->getCoveredFunctions())
@@ -535,12 +521,11 @@ TEST_P(CoverageMappingTest, strip_filename_prefix) {
}
TEST_P(CoverageMappingTest, strip_unknown_filename_prefix) {
- InstrProfRecord Record("<unknown>:func", 0x1234, {0});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"<unknown>:func", 0x1234, {0}}, Err);
startFunction("<unknown>:func", 0x1234);
addCMR(Counter::getCounter(0), "", 1, 1, 9, 9);
- NoError(loadCoverageMapping(/*EmitFilenames=*/false));
+ EXPECT_THAT_ERROR(loadCoverageMapping(/*EmitFilenames=*/false), Succeeded());
std::vector<std::string> Names;
for (const auto &Func : LoadedCoverage->getCoveredFunctions())
@@ -550,10 +535,8 @@ TEST_P(CoverageMappingTest, strip_unknown_filename_prefix) {
}
TEST_P(CoverageMappingTest, dont_detect_false_instantiations) {
- InstrProfRecord Record1("foo", 0x1234, {10});
- InstrProfRecord Record2("bar", 0x2345, {20});
- NoError(ProfileWriter.addRecord(std::move(Record1)));
- NoError(ProfileWriter.addRecord(std::move(Record2)));
+ ProfileWriter.addRecord({"foo", 0x1234, {10}}, Err);
+ ProfileWriter.addRecord({"bar", 0x2345, {20}}, Err);
startFunction("foo", 0x1234);
addCMR(Counter::getCounter(0), "expanded", 1, 1, 1, 10);
@@ -563,7 +546,7 @@ TEST_P(CoverageMappingTest, dont_detect_false_instantiations) {
addCMR(Counter::getCounter(0), "expanded", 1, 1, 1, 10);
addExpansionCMR("main", "expanded", 9, 1, 9, 5);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
std::vector<const FunctionRecord *> Instantiations =
LoadedCoverage->getInstantiations("expanded");
@@ -571,14 +554,13 @@ TEST_P(CoverageMappingTest, dont_detect_false_instantiations) {
}
TEST_P(CoverageMappingTest, load_coverage_for_expanded_file) {
- InstrProfRecord Record("func", 0x1234, {10});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"func", 0x1234, {10}}, Err);
startFunction("func", 0x1234);
addCMR(Counter::getCounter(0), "expanded", 1, 1, 1, 10);
addExpansionCMR("main", "expanded", 4, 1, 4, 5);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
CoverageData Data = LoadedCoverage->getCoverageForFile("expanded");
std::vector<CoverageSegment> Segments(Data.begin(), Data.end());
@@ -588,8 +570,7 @@ TEST_P(CoverageMappingTest, load_coverage_for_expanded_file) {
}
TEST_P(CoverageMappingTest, skip_duplicate_function_record) {
- InstrProfRecord Record("func", 0x1234, {1});
- NoError(ProfileWriter.addRecord(std::move(Record)));
+ ProfileWriter.addRecord({"func", 0x1234, {1}}, Err);
startFunction("func", 0x1234);
addCMR(Counter::getCounter(0), "file1", 1, 1, 9, 9);
@@ -597,7 +578,7 @@ TEST_P(CoverageMappingTest, skip_duplicate_function_record) {
startFunction("func", 0x1234);
addCMR(Counter::getCounter(0), "file1", 1, 1, 9, 9);
- NoError(loadCoverageMapping());
+ EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
auto Funcs = LoadedCoverage->getCoveredFunctions();
unsigned NumFuncs = std::distance(Funcs.begin(), Funcs.end());
diff --git a/unittests/ProfileData/InstrProfTest.cpp b/unittests/ProfileData/InstrProfTest.cpp
index 13436cc0d5b2..79f880e475c6 100644
--- a/unittests/ProfileData/InstrProfTest.cpp
+++ b/unittests/ProfileData/InstrProfTest.cpp
@@ -14,20 +14,15 @@
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/ProfileData/InstrProfWriter.h"
#include "llvm/Support/Compression.h"
+#include "llvm/Testing/Support/Error.h"
+#include "llvm/Testing/Support/SupportHelpers.h"
#include "gtest/gtest.h"
#include <cstdarg>
using namespace llvm;
-static ::testing::AssertionResult NoError(Error E) {
- if (!E)
- return ::testing::AssertionSuccess();
- return ::testing::AssertionFailure() << "error: " << toString(std::move(E))
- << "\n";
-}
-
-static ::testing::AssertionResult ErrorEquals(instrprof_error Expected,
- Error E) {
+LLVM_NODISCARD static ::testing::AssertionResult
+ErrorEquals(instrprof_error Expected, Error E) {
instrprof_error Found;
std::string FoundMsg;
handleAllErrors(std::move(E), [&](const InstrProfError &IPE) {
@@ -49,7 +44,7 @@ struct InstrProfTest : ::testing::Test {
void readProfile(std::unique_ptr<MemoryBuffer> Profile) {
auto ReaderOrErr = IndexedInstrProfReader::create(std::move(Profile));
- ASSERT_TRUE(NoError(ReaderOrErr.takeError()));
+ EXPECT_THAT_ERROR(ReaderOrErr.takeError(), Succeeded());
Reader = std::move(ReaderOrErr.get());
}
};
@@ -69,9 +64,13 @@ TEST_P(MaybeSparseInstrProfTest, write_and_read_empty_profile) {
ASSERT_TRUE(Reader->begin() == Reader->end());
}
+static const auto Err = [](Error E) {
+ consumeError(std::move(E));
+ FAIL();
+};
+
TEST_P(MaybeSparseInstrProfTest, write_and_read_one_function) {
- InstrProfRecord Record("foo", 0x1234, {1, 2, 3, 4});
- NoError(Writer.addRecord(std::move(Record)));
+ Writer.addRecord({"foo", 0x1234, {1, 2, 3, 4}}, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
@@ -88,21 +87,19 @@ TEST_P(MaybeSparseInstrProfTest, write_and_read_one_function) {
}
TEST_P(MaybeSparseInstrProfTest, get_instr_prof_record) {
- InstrProfRecord Record1("foo", 0x1234, {1, 2});
- InstrProfRecord Record2("foo", 0x1235, {3, 4});
- NoError(Writer.addRecord(std::move(Record1)));
- NoError(Writer.addRecord(std::move(Record2)));
+ Writer.addRecord({"foo", 0x1234, {1, 2}}, Err);
+ Writer.addRecord({"foo", 0x1235, {3, 4}}, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
Expected<InstrProfRecord> R = Reader->getInstrProfRecord("foo", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
ASSERT_EQ(2U, R->Counts.size());
ASSERT_EQ(1U, R->Counts[0]);
ASSERT_EQ(2U, R->Counts[1]);
R = Reader->getInstrProfRecord("foo", 0x1235);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
ASSERT_EQ(2U, R->Counts.size());
ASSERT_EQ(3U, R->Counts[0]);
ASSERT_EQ(4U, R->Counts[1]);
@@ -115,20 +112,20 @@ TEST_P(MaybeSparseInstrProfTest, get_instr_prof_record) {
}
TEST_P(MaybeSparseInstrProfTest, get_function_counts) {
- InstrProfRecord Record1("foo", 0x1234, {1, 2});
- InstrProfRecord Record2("foo", 0x1235, {3, 4});
- NoError(Writer.addRecord(std::move(Record1)));
- NoError(Writer.addRecord(std::move(Record2)));
+ Writer.addRecord({"foo", 0x1234, {1, 2}}, Err);
+ Writer.addRecord({"foo", 0x1235, {3, 4}}, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
std::vector<uint64_t> Counts;
- ASSERT_TRUE(NoError(Reader->getFunctionCounts("foo", 0x1234, Counts)));
+ EXPECT_THAT_ERROR(Reader->getFunctionCounts("foo", 0x1234, Counts),
+ Succeeded());
ASSERT_EQ(2U, Counts.size());
ASSERT_EQ(1U, Counts[0]);
ASSERT_EQ(2U, Counts[1]);
- ASSERT_TRUE(NoError(Reader->getFunctionCounts("foo", 0x1235, Counts)));
+ EXPECT_THAT_ERROR(Reader->getFunctionCounts("foo", 0x1235, Counts),
+ Succeeded());
ASSERT_EQ(2U, Counts.size());
ASSERT_EQ(3U, Counts[0]);
ASSERT_EQ(4U, Counts[1]);
@@ -142,17 +139,15 @@ TEST_P(MaybeSparseInstrProfTest, get_function_counts) {
// Profile data is copied from general.proftext
TEST_F(InstrProfTest, get_profile_summary) {
- InstrProfRecord Record1("func1", 0x1234, {97531});
- InstrProfRecord Record2("func2", 0x1234, {0, 0});
- InstrProfRecord Record3("func3", 0x1234,
- {2305843009213693952, 1152921504606846976,
- 576460752303423488, 288230376151711744,
- 144115188075855872, 72057594037927936});
- InstrProfRecord Record4("func4", 0x1234, {0});
- NoError(Writer.addRecord(std::move(Record1)));
- NoError(Writer.addRecord(std::move(Record2)));
- NoError(Writer.addRecord(std::move(Record3)));
- NoError(Writer.addRecord(std::move(Record4)));
+ Writer.addRecord({"func1", 0x1234, {97531}}, Err);
+ Writer.addRecord({"func2", 0x1234, {0, 0}}, Err);
+ Writer.addRecord(
+ {"func3",
+ 0x1234,
+ {2305843009213693952, 1152921504606846976, 576460752303423488,
+ 288230376151711744, 144115188075855872, 72057594037927936}},
+ Err);
+ Writer.addRecord({"func4", 0x1234, {0}}, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
@@ -203,25 +198,23 @@ TEST_F(InstrProfTest, get_profile_summary) {
}
TEST_F(InstrProfTest, test_writer_merge) {
- InstrProfRecord Record1("func1", 0x1234, {42});
- NoError(Writer.addRecord(std::move(Record1)));
+ Writer.addRecord({"func1", 0x1234, {42}}, Err);
InstrProfWriter Writer2;
- InstrProfRecord Record2("func2", 0x1234, {0, 0});
- NoError(Writer2.addRecord(std::move(Record2)));
+ Writer2.addRecord({"func2", 0x1234, {0, 0}}, Err);
- NoError(Writer.mergeRecordsFromWriter(std::move(Writer2)));
+ Writer.mergeRecordsFromWriter(std::move(Writer2), Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
Expected<InstrProfRecord> R = Reader->getInstrProfRecord("func1", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
ASSERT_EQ(1U, R->Counts.size());
ASSERT_EQ(42U, R->Counts[0]);
R = Reader->getInstrProfRecord("func2", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
ASSERT_EQ(2U, R->Counts.size());
ASSERT_EQ(0U, R->Counts[0]);
ASSERT_EQ(0U, R->Counts[1]);
@@ -235,10 +228,7 @@ static const char callee5[] = "callee5";
static const char callee6[] = "callee6";
TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write) {
- InstrProfRecord Record1("caller", 0x1234, {1, 2});
- InstrProfRecord Record2("callee1", 0x1235, {3, 4});
- InstrProfRecord Record3("callee2", 0x1235, {3, 4});
- InstrProfRecord Record4("callee3", 0x1235, {3, 4});
+ NamedInstrProfRecord Record1("caller", 0x1234, {1, 2});
// 4 value sites.
Record1.reserveSites(IPVK_IndirectCallTarget, 4);
@@ -252,15 +242,15 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write) {
InstrProfValueData VD3[] = {{(uint64_t)callee1, 1}};
Record1.addValueData(IPVK_IndirectCallTarget, 3, VD3, 1, nullptr);
- NoError(Writer.addRecord(std::move(Record1)));
- NoError(Writer.addRecord(std::move(Record2)));
- NoError(Writer.addRecord(std::move(Record3)));
- NoError(Writer.addRecord(std::move(Record4)));
+ Writer.addRecord(std::move(Record1), Err);
+ Writer.addRecord({"callee1", 0x1235, {3, 4}}, Err);
+ Writer.addRecord({"callee2", 0x1235, {3, 4}}, Err);
+ Writer.addRecord({"callee3", 0x1235, {3, 4}}, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
Expected<InstrProfRecord> R = Reader->getInstrProfRecord("caller", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
ASSERT_EQ(4U, R->getNumValueSites(IPVK_IndirectCallTarget));
ASSERT_EQ(3U, R->getNumValueDataForSite(IPVK_IndirectCallTarget, 0));
ASSERT_EQ(0U, R->getNumValueDataForSite(IPVK_IndirectCallTarget, 1));
@@ -282,16 +272,16 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write) {
}
TEST_P(MaybeSparseInstrProfTest, annotate_vp_data) {
- InstrProfRecord Record("caller", 0x1234, {1, 2});
+ NamedInstrProfRecord Record("caller", 0x1234, {1, 2});
Record.reserveSites(IPVK_IndirectCallTarget, 1);
InstrProfValueData VD0[] = {{1000, 1}, {2000, 2}, {3000, 3}, {5000, 5},
{4000, 4}, {6000, 6}};
Record.addValueData(IPVK_IndirectCallTarget, 0, VD0, 6, nullptr);
- NoError(Writer.addRecord(std::move(Record)));
+ Writer.addRecord(std::move(Record), Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
Expected<InstrProfRecord> R = Reader->getInstrProfRecord("caller", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
LLVMContext Ctx;
std::unique_ptr<Module> M(new Module("MyModule", Ctx));
@@ -378,10 +368,7 @@ TEST_P(MaybeSparseInstrProfTest, annotate_vp_data) {
}
TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write_with_weight) {
- InstrProfRecord Record1("caller", 0x1234, {1, 2});
- InstrProfRecord Record2("callee1", 0x1235, {3, 4});
- InstrProfRecord Record3("callee2", 0x1235, {3, 4});
- InstrProfRecord Record4("callee3", 0x1235, {3, 4});
+ NamedInstrProfRecord Record1("caller", 0x1234, {1, 2});
// 4 value sites.
Record1.reserveSites(IPVK_IndirectCallTarget, 4);
@@ -395,15 +382,15 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write_with_weight) {
InstrProfValueData VD3[] = {{(uint64_t)callee1, 1}};
Record1.addValueData(IPVK_IndirectCallTarget, 3, VD3, 1, nullptr);
- NoError(Writer.addRecord(std::move(Record1), 10));
- NoError(Writer.addRecord(std::move(Record2)));
- NoError(Writer.addRecord(std::move(Record3)));
- NoError(Writer.addRecord(std::move(Record4)));
+ Writer.addRecord(std::move(Record1), 10, Err);
+ Writer.addRecord({"callee1", 0x1235, {3, 4}}, Err);
+ Writer.addRecord({"callee2", 0x1235, {3, 4}}, Err);
+ Writer.addRecord({"callee3", 0x1235, {3, 4}}, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
Expected<InstrProfRecord> R = Reader->getInstrProfRecord("caller", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
ASSERT_EQ(4U, R->getNumValueSites(IPVK_IndirectCallTarget));
ASSERT_EQ(3U, R->getNumValueDataForSite(IPVK_IndirectCallTarget, 0));
ASSERT_EQ(0U, R->getNumValueDataForSite(IPVK_IndirectCallTarget, 1));
@@ -424,10 +411,7 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write_with_weight) {
}
TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write_big_endian) {
- InstrProfRecord Record1("caller", 0x1234, {1, 2});
- InstrProfRecord Record2("callee1", 0x1235, {3, 4});
- InstrProfRecord Record3("callee2", 0x1235, {3, 4});
- InstrProfRecord Record4("callee3", 0x1235, {3, 4});
+ NamedInstrProfRecord Record1("caller", 0x1234, {1, 2});
// 4 value sites.
Record1.reserveSites(IPVK_IndirectCallTarget, 4);
@@ -441,10 +425,10 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write_big_endian) {
InstrProfValueData VD3[] = {{(uint64_t)callee1, 1}};
Record1.addValueData(IPVK_IndirectCallTarget, 3, VD3, 1, nullptr);
- NoError(Writer.addRecord(std::move(Record1)));
- NoError(Writer.addRecord(std::move(Record2)));
- NoError(Writer.addRecord(std::move(Record3)));
- NoError(Writer.addRecord(std::move(Record4)));
+ Writer.addRecord(std::move(Record1), Err);
+ Writer.addRecord({"callee1", 0x1235, {3, 4}}, Err);
+ Writer.addRecord({"callee2", 0x1235, {3, 4}}, Err);
+ Writer.addRecord({"callee3", 0x1235, {3, 4}}, Err);
// Set big endian output.
Writer.setValueProfDataEndianness(support::big);
@@ -456,7 +440,7 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write_big_endian) {
Reader->setValueProfDataEndianness(support::big);
Expected<InstrProfRecord> R = Reader->getInstrProfRecord("caller", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
ASSERT_EQ(4U, R->getNumValueSites(IPVK_IndirectCallTarget));
ASSERT_EQ(3U, R->getNumValueDataForSite(IPVK_IndirectCallTarget, 0));
ASSERT_EQ(0U, R->getNumValueDataForSite(IPVK_IndirectCallTarget, 1));
@@ -475,13 +459,8 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_read_write_big_endian) {
TEST_P(MaybeSparseInstrProfTest, get_icall_data_merge1) {
static const char caller[] = "caller";
- InstrProfRecord Record11(caller, 0x1234, {1, 2});
- InstrProfRecord Record12(caller, 0x1234, {1, 2});
- InstrProfRecord Record2(callee1, 0x1235, {3, 4});
- InstrProfRecord Record3(callee2, 0x1235, {3, 4});
- InstrProfRecord Record4(callee3, 0x1235, {3, 4});
- InstrProfRecord Record5(callee3, 0x1235, {3, 4});
- InstrProfRecord Record6(callee4, 0x1235, {3, 5});
+ NamedInstrProfRecord Record11(caller, 0x1234, {1, 2});
+ NamedInstrProfRecord Record12(caller, 0x1234, {1, 2});
// 5 value sites.
Record11.reserveSites(IPVK_IndirectCallTarget, 5);
@@ -525,20 +504,20 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_merge1) {
{uint64_t(callee3), 3}};
Record12.addValueData(IPVK_IndirectCallTarget, 4, VD42, 3, nullptr);
- NoError(Writer.addRecord(std::move(Record11)));
+ Writer.addRecord(std::move(Record11), Err);
// Merge profile data.
- NoError(Writer.addRecord(std::move(Record12)));
+ Writer.addRecord(std::move(Record12), Err);
- NoError(Writer.addRecord(std::move(Record2)));
- NoError(Writer.addRecord(std::move(Record3)));
- NoError(Writer.addRecord(std::move(Record4)));
- NoError(Writer.addRecord(std::move(Record5)));
- NoError(Writer.addRecord(std::move(Record6)));
+ Writer.addRecord({callee1, 0x1235, {3, 4}}, Err);
+ Writer.addRecord({callee2, 0x1235, {3, 4}}, Err);
+ Writer.addRecord({callee3, 0x1235, {3, 4}}, Err);
+ Writer.addRecord({callee3, 0x1235, {3, 4}}, Err);
+ Writer.addRecord({callee4, 0x1235, {3, 5}}, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
Expected<InstrProfRecord> R = Reader->getInstrProfRecord("caller", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
ASSERT_EQ(5U, R->getNumValueSites(IPVK_IndirectCallTarget));
ASSERT_EQ(4U, R->getNumValueDataForSite(IPVK_IndirectCallTarget, 0));
ASSERT_EQ(0U, R->getNumValueDataForSite(IPVK_IndirectCallTarget, 1));
@@ -588,38 +567,37 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_merge1_saturation) {
const uint64_t Max = std::numeric_limits<uint64_t>::max();
- InstrProfRecord Record1("foo", 0x1234, {1});
- auto Result1 = Writer.addRecord(std::move(Record1));
- ASSERT_EQ(InstrProfError::take(std::move(Result1)),
- instrprof_error::success);
+ instrprof_error Result;
+ auto Err = [&](Error E) { Result = InstrProfError::take(std::move(E)); };
+ Result = instrprof_error::success;
+ Writer.addRecord({"foo", 0x1234, {1}}, Err);
+ ASSERT_EQ(Result, instrprof_error::success);
// Verify counter overflow.
- InstrProfRecord Record2("foo", 0x1234, {Max});
- auto Result2 = Writer.addRecord(std::move(Record2));
- ASSERT_EQ(InstrProfError::take(std::move(Result2)),
- instrprof_error::counter_overflow);
+ Result = instrprof_error::success;
+ Writer.addRecord({"foo", 0x1234, {Max}}, Err);
+ ASSERT_EQ(Result, instrprof_error::counter_overflow);
- InstrProfRecord Record3(bar, 0x9012, {8});
- auto Result3 = Writer.addRecord(std::move(Record3));
- ASSERT_EQ(InstrProfError::take(std::move(Result3)),
- instrprof_error::success);
+ Result = instrprof_error::success;
+ Writer.addRecord({bar, 0x9012, {8}}, Err);
+ ASSERT_EQ(Result, instrprof_error::success);
- InstrProfRecord Record4("baz", 0x5678, {3, 4});
+ NamedInstrProfRecord Record4("baz", 0x5678, {3, 4});
Record4.reserveSites(IPVK_IndirectCallTarget, 1);
InstrProfValueData VD4[] = {{uint64_t(bar), 1}};
Record4.addValueData(IPVK_IndirectCallTarget, 0, VD4, 1, nullptr);
- auto Result4 = Writer.addRecord(std::move(Record4));
- ASSERT_EQ(InstrProfError::take(std::move(Result4)),
- instrprof_error::success);
+ Result = instrprof_error::success;
+ Writer.addRecord(std::move(Record4), Err);
+ ASSERT_EQ(Result, instrprof_error::success);
// Verify value data counter overflow.
- InstrProfRecord Record5("baz", 0x5678, {5, 6});
+ NamedInstrProfRecord Record5("baz", 0x5678, {5, 6});
Record5.reserveSites(IPVK_IndirectCallTarget, 1);
InstrProfValueData VD5[] = {{uint64_t(bar), Max}};
Record5.addValueData(IPVK_IndirectCallTarget, 0, VD5, 1, nullptr);
- auto Result5 = Writer.addRecord(std::move(Record5));
- ASSERT_EQ(InstrProfError::take(std::move(Result5)),
- instrprof_error::counter_overflow);
+ Result = instrprof_error::success;
+ Writer.addRecord(std::move(Record5), Err);
+ ASSERT_EQ(Result, instrprof_error::counter_overflow);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
@@ -627,7 +605,7 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_merge1_saturation) {
// Verify saturation of counts.
Expected<InstrProfRecord> ReadRecord1 =
Reader->getInstrProfRecord("foo", 0x1234);
- ASSERT_TRUE(NoError(ReadRecord1.takeError()));
+ EXPECT_THAT_ERROR(ReadRecord1.takeError(), Succeeded());
ASSERT_EQ(Max, ReadRecord1->Counts[0]);
Expected<InstrProfRecord> ReadRecord2 =
@@ -646,8 +624,8 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_merge1_saturation) {
TEST_P(MaybeSparseInstrProfTest, get_icall_data_merge_site_trunc) {
static const char caller[] = "caller";
- InstrProfRecord Record11(caller, 0x1234, {1, 2});
- InstrProfRecord Record12(caller, 0x1234, {1, 2});
+ NamedInstrProfRecord Record11(caller, 0x1234, {1, 2});
+ NamedInstrProfRecord Record12(caller, 0x1234, {1, 2});
// 2 value sites.
Record11.reserveSites(IPVK_IndirectCallTarget, 2);
@@ -670,15 +648,15 @@ TEST_P(MaybeSparseInstrProfTest, get_icall_data_merge_site_trunc) {
Record12.addValueData(IPVK_IndirectCallTarget, 0, VD1, 255, nullptr);
Record12.addValueData(IPVK_IndirectCallTarget, 1, nullptr, 0, nullptr);
- NoError(Writer.addRecord(std::move(Record11)));
+ Writer.addRecord(std::move(Record11), Err);
// Merge profile data.
- NoError(Writer.addRecord(std::move(Record12)));
+ Writer.addRecord(std::move(Record12), Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
Expected<InstrProfRecord> R = Reader->getInstrProfRecord("caller", 0x1234);
- ASSERT_TRUE(NoError(R.takeError()));
+ EXPECT_THAT_ERROR(R.takeError(), Succeeded());
std::unique_ptr<InstrProfValueData[]> VD(
R->getValueForSite(IPVK_IndirectCallTarget, 0));
ASSERT_EQ(2U, R->getNumValueSites(IPVK_IndirectCallTarget));
@@ -713,12 +691,12 @@ static void addValueProfData(InstrProfRecord &Record) {
}
TEST_P(MaybeSparseInstrProfTest, value_prof_data_read_write) {
- InstrProfRecord SrcRecord("caller", 0x1234, {1ULL << 31, 2});
+ InstrProfRecord SrcRecord({1ULL << 31, 2});
addValueProfData(SrcRecord);
std::unique_ptr<ValueProfData> VPData =
ValueProfData::serializeFrom(SrcRecord);
- InstrProfRecord Record("caller", 0x1234, {1ULL << 31, 2});
+ InstrProfRecord Record({1ULL << 31, 2});
VPData->deserializeTo(Record, nullptr);
// Now read data from Record and sanity check the data
@@ -779,12 +757,12 @@ TEST_P(MaybeSparseInstrProfTest, value_prof_data_read_write) {
TEST_P(MaybeSparseInstrProfTest, value_prof_data_read_write_mapping) {
- InstrProfRecord SrcRecord("caller", 0x1234, {1ULL << 31, 2});
+ NamedInstrProfRecord SrcRecord("caller", 0x1234, {1ULL << 31, 2});
addValueProfData(SrcRecord);
std::unique_ptr<ValueProfData> VPData =
ValueProfData::serializeFrom(SrcRecord);
- InstrProfRecord Record("caller", 0x1234, {1ULL << 31, 2});
+ NamedInstrProfRecord Record("caller", 0x1234, {1ULL << 31, 2});
InstrProfSymtab Symtab;
Symtab.mapAddress(uint64_t(callee1), 0x1000ULL);
Symtab.mapAddress(uint64_t(callee2), 0x2000ULL);
@@ -817,12 +795,9 @@ TEST_P(MaybeSparseInstrProfTest, value_prof_data_read_write_mapping) {
}
TEST_P(MaybeSparseInstrProfTest, get_max_function_count) {
- InstrProfRecord Record1("foo", 0x1234, {1ULL << 31, 2});
- InstrProfRecord Record2("bar", 0, {1ULL << 63});
- InstrProfRecord Record3("baz", 0x5678, {0, 0, 0, 0});
- NoError(Writer.addRecord(std::move(Record1)));
- NoError(Writer.addRecord(std::move(Record2)));
- NoError(Writer.addRecord(std::move(Record3)));
+ Writer.addRecord({"foo", 0x1234, {1ULL << 31, 2}}, Err);
+ Writer.addRecord({"bar", 0, {1ULL << 63}}, Err);
+ Writer.addRecord({"baz", 0x5678, {0, 0, 0, 0}}, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
@@ -830,20 +805,20 @@ TEST_P(MaybeSparseInstrProfTest, get_max_function_count) {
}
TEST_P(MaybeSparseInstrProfTest, get_weighted_function_counts) {
- InstrProfRecord Record1("foo", 0x1234, {1, 2});
- InstrProfRecord Record2("foo", 0x1235, {3, 4});
- NoError(Writer.addRecord(std::move(Record1), 3));
- NoError(Writer.addRecord(std::move(Record2), 5));
+ Writer.addRecord({"foo", 0x1234, {1, 2}}, 3, Err);
+ Writer.addRecord({"foo", 0x1235, {3, 4}}, 5, Err);
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
std::vector<uint64_t> Counts;
- ASSERT_TRUE(NoError(Reader->getFunctionCounts("foo", 0x1234, Counts)));
+ EXPECT_THAT_ERROR(Reader->getFunctionCounts("foo", 0x1234, Counts),
+ Succeeded());
ASSERT_EQ(2U, Counts.size());
ASSERT_EQ(3U, Counts[0]);
ASSERT_EQ(6U, Counts[1]);
- ASSERT_TRUE(NoError(Reader->getFunctionCounts("foo", 0x1235, Counts)));
+ EXPECT_THAT_ERROR(Reader->getFunctionCounts("foo", 0x1235, Counts),
+ Succeeded());
ASSERT_EQ(2U, Counts.size());
ASSERT_EQ(15U, Counts[0]);
ASSERT_EQ(20U, Counts[1]);
@@ -859,7 +834,7 @@ TEST_P(MaybeSparseInstrProfTest, instr_prof_symtab_test) {
FuncNames.push_back("bar2");
FuncNames.push_back("bar3");
InstrProfSymtab Symtab;
- NoError(Symtab.create(FuncNames));
+ EXPECT_THAT_ERROR(Symtab.create(FuncNames), Succeeded());
StringRef R = Symtab.getFuncName(IndexedInstrProf::ComputeHash("func1"));
ASSERT_EQ(StringRef("func1"), R);
R = Symtab.getFuncName(IndexedInstrProf::ComputeHash("func2"));
@@ -880,9 +855,9 @@ TEST_P(MaybeSparseInstrProfTest, instr_prof_symtab_test) {
ASSERT_EQ(StringRef(), R);
// Now incrementally update the symtab
- NoError(Symtab.addFuncName("blah_1"));
- NoError(Symtab.addFuncName("blah_2"));
- NoError(Symtab.addFuncName("blah_3"));
+ EXPECT_THAT_ERROR(Symtab.addFuncName("blah_1"), Succeeded());
+ EXPECT_THAT_ERROR(Symtab.addFuncName("blah_2"), Succeeded());
+ EXPECT_THAT_ERROR(Symtab.addFuncName("blah_3"), Succeeded());
// Finalize it
Symtab.finalizeSymtab();
@@ -910,7 +885,7 @@ TEST_P(MaybeSparseInstrProfTest, instr_prof_symtab_test) {
// Test that we get an error when creating a bogus symtab.
TEST_P(MaybeSparseInstrProfTest, instr_prof_bogus_symtab_empty_func_name) {
InstrProfSymtab Symtab;
- ErrorEquals(instrprof_error::malformed, Symtab.addFuncName(""));
+ EXPECT_TRUE(ErrorEquals(instrprof_error::malformed, Symtab.addFuncName("")));
}
// Testing symtab creator interface used by value profile transformer.
@@ -933,7 +908,7 @@ TEST_P(MaybeSparseInstrProfTest, instr_prof_symtab_module_test) {
Function::Create(FTy, Function::WeakODRLinkage, "Wbar", M.get());
InstrProfSymtab ProfSymtab;
- NoError(ProfSymtab.create(*M));
+ EXPECT_THAT_ERROR(ProfSymtab.create(*M), Succeeded());
StringRef Funcs[] = {"Gfoo", "Gblah", "Gbar", "Ifoo", "Iblah", "Ibar",
"Pfoo", "Pblah", "Pbar", "Wfoo", "Wblah", "Wbar"};
@@ -973,13 +948,17 @@ TEST_P(MaybeSparseInstrProfTest, instr_prof_symtab_compression_test) {
for (bool DoCompression : {false, true}) {
// Compressing:
std::string FuncNameStrings1;
- NoError(collectPGOFuncNameStrings(
- FuncNames1, (DoCompression && zlib::isAvailable()), FuncNameStrings1));
+ EXPECT_THAT_ERROR(collectPGOFuncNameStrings(
+ FuncNames1, (DoCompression && zlib::isAvailable()),
+ FuncNameStrings1),
+ Succeeded());
// Compressing:
std::string FuncNameStrings2;
- NoError(collectPGOFuncNameStrings(
- FuncNames2, (DoCompression && zlib::isAvailable()), FuncNameStrings2));
+ EXPECT_THAT_ERROR(collectPGOFuncNameStrings(
+ FuncNames2, (DoCompression && zlib::isAvailable()),
+ FuncNameStrings2),
+ Succeeded());
for (int Padding = 0; Padding < 2; Padding++) {
// Join with paddings :
@@ -991,7 +970,7 @@ TEST_P(MaybeSparseInstrProfTest, instr_prof_symtab_compression_test) {
// Now decompress:
InstrProfSymtab Symtab;
- NoError(Symtab.create(StringRef(FuncNameStrings)));
+ EXPECT_THAT_ERROR(Symtab.create(StringRef(FuncNameStrings)), Succeeded());
// Now do the checks:
// First sampling some data points:
@@ -1015,13 +994,10 @@ TEST_P(MaybeSparseInstrProfTest, instr_prof_symtab_compression_test) {
}
TEST_F(SparseInstrProfTest, preserve_no_records) {
- InstrProfRecord Record1("foo", 0x1234, {0});
- InstrProfRecord Record2("bar", 0x4321, {0, 0});
- InstrProfRecord Record3("bar", 0x4321, {0, 0, 0});
+ Writer.addRecord({"foo", 0x1234, {0}}, Err);
+ Writer.addRecord({"bar", 0x4321, {0, 0}}, Err);
+ Writer.addRecord({"baz", 0x4321, {0, 0, 0}}, Err);
- NoError(Writer.addRecord(std::move(Record1)));
- NoError(Writer.addRecord(std::move(Record2)));
- NoError(Writer.addRecord(std::move(Record3)));
auto Profile = Writer.writeBuffer();
readProfile(std::move(Profile));
diff --git a/unittests/Support/DynamicLibrary/DynamicLibraryTest.cpp b/unittests/Support/DynamicLibrary/DynamicLibraryTest.cpp
index c54e1b7eed24..370e1c5ed5e8 100644
--- a/unittests/Support/DynamicLibrary/DynamicLibraryTest.cpp
+++ b/unittests/Support/DynamicLibrary/DynamicLibraryTest.cpp
@@ -77,6 +77,7 @@ TEST(DynamicLibrary, Overload) {
EXPECT_TRUE(DL.isValid());
EXPECT_TRUE(Err.empty());
+ // Test overloading local symbols does not occur by default
GS = FuncPtr<GetString>(DynamicLibrary::SearchForAddressOfSymbol("TestA"));
EXPECT_TRUE(GS != nullptr && GS == &TestA);
EXPECT_EQ(StdString(GS()), "ProcessCall");
@@ -85,6 +86,12 @@ TEST(DynamicLibrary, Overload) {
EXPECT_TRUE(GS != nullptr && GS == &TestA);
EXPECT_EQ(StdString(GS()), "ProcessCall");
+ // Test overloading by forcing library priority when searching for a symbol
+ DynamicLibrary::SearchOrder = DynamicLibrary::SO_LoadedFirst;
+ GS = FuncPtr<GetString>(DynamicLibrary::SearchForAddressOfSymbol("TestA"));
+ EXPECT_TRUE(GS != nullptr && GS != &TestA);
+ EXPECT_EQ(StdString(GS()), "LibCall");
+
DynamicLibrary::AddSymbol("TestA", PtrFunc(&OverloadTestA));
GS = FuncPtr<GetString>(DL.getAddressOfSymbol("TestA"));
EXPECT_TRUE(GS != nullptr && GS != &OverloadTestA);
@@ -95,6 +102,9 @@ TEST(DynamicLibrary, Overload) {
}
EXPECT_TRUE(FuncPtr<GetString>(DynamicLibrary::SearchForAddressOfSymbol(
"TestA")) == nullptr);
+
+ // Check serach ordering is reset to default after call to llvm_shutdown
+ EXPECT_TRUE(DynamicLibrary::SearchOrder == DynamicLibrary::SO_Linker);
}
TEST(DynamicLibrary, Shutdown) {
diff --git a/unittests/Support/ErrorTest.cpp b/unittests/Support/ErrorTest.cpp
index 299fc50b4697..a762cf023f9c 100644
--- a/unittests/Support/ErrorTest.cpp
+++ b/unittests/Support/ErrorTest.cpp
@@ -360,7 +360,7 @@ TEST(Error, CheckJoinErrors) {
[&](const CustomError &CE) {
Sum += CE.getInfo();
});
- EXPECT_EQ(Sum, 28) << "Failed to correctly concatenate erorr lists.";
+ EXPECT_EQ(Sum, 28) << "Failed to correctly concatenate error lists.";
}
}
diff --git a/unittests/Support/Host.cpp b/unittests/Support/Host.cpp
index fd53697793c7..4f895e7163c5 100644
--- a/unittests/Support/Host.cpp
+++ b/unittests/Support/Host.cpp
@@ -10,9 +10,23 @@
#include "llvm/Support/Host.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
#include "gtest/gtest.h"
+#define ASSERT_NO_ERROR(x) \
+ if (std::error_code ASSERT_NO_ERROR_ec = x) { \
+ SmallString<128> MessageStorage; \
+ raw_svector_ostream Message(MessageStorage); \
+ Message << #x ": did not return errc::success.\n" \
+ << "error number: " << ASSERT_NO_ERROR_ec.value() << "\n" \
+ << "error message: " << ASSERT_NO_ERROR_ec.message() << "\n"; \
+ GTEST_FATAL_FAILURE_(MessageStorage.c_str()); \
+ } else { \
+ }
+
using namespace llvm;
class HostTest : public testing::Test {
@@ -114,3 +128,50 @@ Hardware : Qualcomm Technologies, Inc MSM8992
EXPECT_EQ(sys::detail::getHostCPUNameForARM(MSM8992ProcCpuInfo),
"cortex-a53");
}
+
+#if defined(__APPLE__)
+TEST_F(HostTest, getMacOSHostVersion) {
+ using namespace llvm::sys;
+ llvm::Triple HostTriple(getProcessTriple());
+ if (!HostTriple.isMacOSX())
+ return;
+
+ SmallString<128> TestDirectory;
+ ASSERT_NO_ERROR(fs::createUniqueDirectory("host_test", TestDirectory));
+ SmallString<128> OutputFile(TestDirectory);
+ path::append(OutputFile, "out");
+
+ const char *SwVersPath = "/usr/bin/sw_vers";
+ const char *argv[] = {SwVersPath, "-productVersion", nullptr};
+ StringRef OutputPath = OutputFile.str();
+ const StringRef *Redirects[] = {/*STDIN=*/nullptr, /*STDOUT=*/&OutputPath,
+ /*STDERR=*/nullptr};
+ int RetCode = ExecuteAndWait(SwVersPath, argv, /*env=*/nullptr, Redirects);
+ ASSERT_EQ(0, RetCode);
+
+ int FD = 0;
+ ASSERT_NO_ERROR(fs::openFileForRead(OutputPath, FD));
+ off_t Size = ::lseek(FD, 0, SEEK_END);
+ ASSERT_NE(-1, Size);
+ ::lseek(FD, 0, SEEK_SET);
+ std::unique_ptr<char[]> Buffer = llvm::make_unique<char[]>(Size);
+ ASSERT_EQ(::read(FD, Buffer.get(), Size), Size);
+ ::close(FD);
+
+ // Ensure that the two versions match.
+ StringRef SystemVersion(Buffer.get(), Size);
+ unsigned SystemMajor, SystemMinor, SystemMicro;
+ ASSERT_EQ(llvm::Triple((Twine("x86_64-apple-macos") + SystemVersion))
+ .getMacOSXVersion(SystemMajor, SystemMinor, SystemMicro),
+ true);
+ unsigned HostMajor, HostMinor, HostMicro;
+ ASSERT_EQ(HostTriple.getMacOSXVersion(HostMajor, HostMinor, HostMicro), true);
+
+ // Don't compare the 'Micro' version, as it's always '0' for the 'Darwin'
+ // triples.
+ ASSERT_EQ(std::tie(SystemMajor, SystemMinor), std::tie(HostMajor, HostMinor));
+
+ ASSERT_NO_ERROR(fs::remove(OutputPath));
+ ASSERT_NO_ERROR(fs::remove(TestDirectory.str()));
+}
+#endif
diff --git a/unittests/Support/MathExtrasTest.cpp b/unittests/Support/MathExtrasTest.cpp
index e26653b8a656..694a1f24d032 100644
--- a/unittests/Support/MathExtrasTest.cpp
+++ b/unittests/Support/MathExtrasTest.cpp
@@ -177,6 +177,7 @@ TEST(MathExtras, reverseBits) {
}
TEST(MathExtras, isPowerOf2_32) {
+ EXPECT_FALSE(isPowerOf2_32(0));
EXPECT_TRUE(isPowerOf2_32(1 << 6));
EXPECT_TRUE(isPowerOf2_32(1 << 12));
EXPECT_FALSE(isPowerOf2_32((1 << 19) + 3));
@@ -184,6 +185,7 @@ TEST(MathExtras, isPowerOf2_32) {
}
TEST(MathExtras, isPowerOf2_64) {
+ EXPECT_FALSE(isPowerOf2_64(0));
EXPECT_TRUE(isPowerOf2_64(1LL << 46));
EXPECT_TRUE(isPowerOf2_64(1LL << 12));
EXPECT_FALSE(isPowerOf2_64((1LL << 53) + 3));
diff --git a/unittests/Transforms/Utils/Cloning.cpp b/unittests/Transforms/Utils/Cloning.cpp
index db3d10847cd8..72a91d144174 100644
--- a/unittests/Transforms/Utils/Cloning.cpp
+++ b/unittests/Transforms/Utils/Cloning.cpp
@@ -312,11 +312,16 @@ protected:
DBuilder.insertDbgValueIntrinsic(AllocaContent, 0, Variable, E, DL,
Entry);
// Also create an inlined variable.
+ // Create a distinct struct type that we should not duplicate during
+ // cloning).
+ auto *StructType = DICompositeType::getDistinct(
+ C, dwarf::DW_TAG_structure_type, "some_struct", nullptr, 0, nullptr,
+ nullptr, 32, 32, 0, DINode::FlagZero, nullptr, 0, nullptr, nullptr);
auto *InlinedSP =
DBuilder.createFunction(CU, "inlined", "inlined", File, 8, FuncType,
true, true, 9, DINode::FlagZero, false);
auto *InlinedVar =
- DBuilder.createAutoVariable(InlinedSP, "inlined", File, 5, IntType, true);
+ DBuilder.createAutoVariable(InlinedSP, "inlined", File, 5, StructType, true);
auto *Scope = DBuilder.createLexicalBlock(
DBuilder.createLexicalBlockFile(InlinedSP, File), File, 1, 1);
auto InlinedDL =
@@ -426,7 +431,11 @@ TEST_F(CloneFunc, DebugIntrinsics) {
EXPECT_EQ(NewFunc, cast<AllocaInst>(NewIntrin->getAddress())->
getParent()->getParent());
- if (!OldIntrin->getDebugLoc()->getInlinedAt()) {
+ if (OldIntrin->getDebugLoc()->getInlinedAt()) {
+ // Inlined variable should refer to the same DILocalVariable as in the
+ // Old Function
+ EXPECT_EQ(OldIntrin->getVariable(), NewIntrin->getVariable());
+ } else {
// Old variable must belong to the old function.
EXPECT_EQ(OldFunc->getSubprogram(),
cast<DISubprogram>(OldIntrin->getVariable()->getScope()));
diff --git a/utils/TableGen/AsmMatcherEmitter.cpp b/utils/TableGen/AsmMatcherEmitter.cpp
index 0980e08f67f7..1f8e1b125889 100644
--- a/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/utils/TableGen/AsmMatcherEmitter.cpp
@@ -2222,7 +2222,7 @@ static void emitValidateOperandClass(AsmMatcherInfo &Info,
OS << " switch (Operand.getReg()) {\n";
OS << " default: OpKind = InvalidMatchClass; break;\n";
for (const auto &RC : Info.RegisterClasses)
- OS << " case " << Info.Target.getName() << "::"
+ OS << " case " << RC.first->getValueAsString("Namespace") << "::"
<< RC.first->getName() << ": OpKind = " << RC.second->Name
<< "; break;\n";
OS << " }\n";
@@ -2711,6 +2711,47 @@ static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
OS << "}\n\n";
}
+static void emitMnemonicSpellChecker(raw_ostream &OS, CodeGenTarget &Target,
+ unsigned VariantCount) {
+ OS << "std::string " << Target.getName() << "MnemonicSpellCheck(StringRef S, uint64_t FBS) {\n";
+ if (!VariantCount)
+ OS << " return \"\";";
+ else {
+ OS << " const unsigned MaxEditDist = 2;\n";
+ OS << " std::vector<StringRef> Candidates;\n";
+ OS << " StringRef Prev = \"\";\n";
+ OS << " auto End = std::end(MatchTable0);\n";
+ OS << "\n";
+ OS << " for (auto I = std::begin(MatchTable0); I < End; I++) {\n";
+ OS << " // Ignore unsupported instructions.\n";
+ OS << " if ((FBS & I->RequiredFeatures) != I->RequiredFeatures)\n";
+ OS << " continue;\n";
+ OS << "\n";
+ OS << " StringRef T = I->getMnemonic();\n";
+ OS << " // Avoid recomputing the edit distance for the same string.\n";
+ OS << " if (T.equals(Prev))\n";
+ OS << " continue;\n";
+ OS << "\n";
+ OS << " Prev = T;\n";
+ OS << " unsigned Dist = S.edit_distance(T, false, MaxEditDist);\n";
+ OS << " if (Dist <= MaxEditDist)\n";
+ OS << " Candidates.push_back(T);\n";
+ OS << " }\n";
+ OS << "\n";
+ OS << " if (Candidates.empty())\n";
+ OS << " return \"\";\n";
+ OS << "\n";
+ OS << " std::string Res = \", did you mean: \";\n";
+ OS << " unsigned i = 0;\n";
+ OS << " for( ; i < Candidates.size() - 1; i++)\n";
+ OS << " Res += Candidates[i].str() + \", \";\n";
+ OS << " return Res + Candidates[i].str() + \"?\";\n";
+ }
+ OS << "}\n";
+ OS << "\n";
+}
+
+
void AsmMatcherEmitter::run(raw_ostream &OS) {
CodeGenTarget Target(Records);
Record *AsmParser = Target.getAsmParser();
@@ -2948,7 +2989,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
std::string LenMnemonic = char(MI->Mnemonic.size()) + MI->Mnemonic.str();
OS << " { " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
<< " /* " << MI->Mnemonic << " */, "
- << Target.getName() << "::"
+ << Target.getInstNamespace() << "::"
<< MI->getResultInst()->TheDef->getName() << ", "
<< MI->ConversionFnKind << ", ";
@@ -2974,6 +3015,8 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << "};\n\n";
}
+ emitMnemonicSpellChecker(OS, Target, VariantCount);
+
// Finally, build the match function.
OS << "unsigned " << Target.getName() << ClassName << "::\n"
<< "MatchInstructionImpl(const OperandVector &Operands,\n";
diff --git a/utils/TableGen/AsmWriterEmitter.cpp b/utils/TableGen/AsmWriterEmitter.cpp
index 30d21984c4d3..75b9bc6cca40 100644
--- a/utils/TableGen/AsmWriterEmitter.cpp
+++ b/utils/TableGen/AsmWriterEmitter.cpp
@@ -137,12 +137,12 @@ static void EmitInstructions(std::vector<AsmWriterInst> &Insts,
O << " switch (MI->getOpcode()) {\n";
O << " default: llvm_unreachable(\"Unexpected opcode.\");\n";
std::vector<std::pair<std::string, AsmWriterOperand>> OpsToPrint;
- OpsToPrint.push_back(std::make_pair(FirstInst.CGI->Namespace + "::" +
+ OpsToPrint.push_back(std::make_pair(FirstInst.CGI->Namespace.str() + "::" +
FirstInst.CGI->TheDef->getName().str(),
FirstInst.Operands[i]));
for (const AsmWriterInst &AWI : SimilarInsts) {
- OpsToPrint.push_back(std::make_pair(AWI.CGI->Namespace+"::" +
+ OpsToPrint.push_back(std::make_pair(AWI.CGI->Namespace.str()+"::" +
AWI.CGI->TheDef->getName().str(),
AWI.Operands[i]));
}
diff --git a/utils/TableGen/CodeEmitterGen.cpp b/utils/TableGen/CodeEmitterGen.cpp
index b80dd5daefe0..23751a2cbfba 100644
--- a/utils/TableGen/CodeEmitterGen.cpp
+++ b/utils/TableGen/CodeEmitterGen.cpp
@@ -187,20 +187,18 @@ AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName,
std::string CodeEmitterGen::getInstructionCase(Record *R,
CodeGenTarget &Target) {
std::string Case;
-
BitsInit *BI = R->getValueAsBitsInit("Inst");
- const std::vector<RecordVal> &Vals = R->getValues();
unsigned NumberedOp = 0;
-
std::set<unsigned> NamedOpIndices;
+
// Collect the set of operand indices that might correspond to named
// operand, and skip these when assigning operands based on position.
if (Target.getInstructionSet()->
getValueAsBit("noNamedPositionallyEncodedOperands")) {
CodeGenInstruction &CGI = Target.getInstruction(R);
- for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ for (const RecordVal &RV : R->getValues()) {
unsigned OpIdx;
- if (!CGI.Operands.hasOperandNamed(Vals[i].getName(), OpIdx))
+ if (!CGI.Operands.hasOperandNamed(RV.getName(), OpIdx))
continue;
NamedOpIndices.insert(OpIdx);
@@ -209,13 +207,13 @@ std::string CodeEmitterGen::getInstructionCase(Record *R,
// Loop over all of the fields in the instruction, determining which are the
// operands to the instruction.
- for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ for (const RecordVal &RV : R->getValues()) {
// Ignore fixed fields in the record, we're looking for values like:
// bits<5> RST = { ?, ?, ?, ?, ? };
- if (Vals[i].getPrefix() || Vals[i].getValue()->isComplete())
+ if (RV.getPrefix() || RV.getValue()->isComplete())
continue;
- AddCodeToMergeInOperand(R, BI, Vals[i].getName(), NumberedOp,
+ AddCodeToMergeInOperand(R, BI, RV.getName(), NumberedOp,
NamedOpIndices, Case, Target);
}
diff --git a/utils/TableGen/CodeGenInstruction.h b/utils/TableGen/CodeGenInstruction.h
index 75db17b59ac3..e173e153879c 100644
--- a/utils/TableGen/CodeGenInstruction.h
+++ b/utils/TableGen/CodeGenInstruction.h
@@ -206,7 +206,7 @@ template <typename T> class ArrayRef;
class CodeGenInstruction {
public:
Record *TheDef; // The actual record defining this instruction.
- std::string Namespace; // The namespace the instruction is in.
+ StringRef Namespace; // The namespace the instruction is in.
/// AsmString - The format string used to emit a .s file for the
/// instruction.
diff --git a/utils/TableGen/CodeGenMapTable.cpp b/utils/TableGen/CodeGenMapTable.cpp
index 60db6c267ad7..43348b622a74 100644
--- a/utils/TableGen/CodeGenMapTable.cpp
+++ b/utils/TableGen/CodeGenMapTable.cpp
@@ -367,7 +367,7 @@ unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) {
ArrayRef<const CodeGenInstruction*> NumberedInstructions =
Target.getInstructionsByEnumValue();
- std::string Namespace = Target.getInstNamespace();
+ StringRef Namespace = Target.getInstNamespace();
const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
unsigned NumCol = ValueCols.size();
unsigned TotalNumInstr = NumberedInstructions.size();
@@ -567,7 +567,7 @@ namespace llvm {
//===----------------------------------------------------------------------===//
void EmitMapTable(RecordKeeper &Records, raw_ostream &OS) {
CodeGenTarget Target(Records);
- std::string NameSpace = Target.getInstNamespace();
+ StringRef NameSpace = Target.getInstNamespace();
std::vector<Record*> InstrMapVec;
InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping");
diff --git a/utils/TableGen/CodeGenTarget.cpp b/utils/TableGen/CodeGenTarget.cpp
index eb277f3298f9..58df3ceceee7 100644
--- a/utils/TableGen/CodeGenTarget.cpp
+++ b/utils/TableGen/CodeGenTarget.cpp
@@ -207,7 +207,7 @@ const StringRef CodeGenTarget::getName() const {
return TargetRec->getName();
}
-std::string CodeGenTarget::getInstNamespace() const {
+StringRef CodeGenTarget::getInstNamespace() const {
for (const CodeGenInstruction *Inst : getInstructionsByEnumValue()) {
// Make sure not to pick up "TargetOpcode" by accidentally getting
// the namespace off the PHI instruction or something.
diff --git a/utils/TableGen/CodeGenTarget.h b/utils/TableGen/CodeGenTarget.h
index c822e940ffae..ff624ea559e5 100644
--- a/utils/TableGen/CodeGenTarget.h
+++ b/utils/TableGen/CodeGenTarget.h
@@ -86,7 +86,7 @@ public:
/// getInstNamespace - Return the target-specific instruction namespace.
///
- std::string getInstNamespace() const;
+ StringRef getInstNamespace() const;
/// getInstructionSet - Return the InstructionSet object.
///
diff --git a/utils/TableGen/DAGISelMatcherGen.cpp b/utils/TableGen/DAGISelMatcherGen.cpp
index d239f96d2a60..d4a56a64324f 100644
--- a/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/utils/TableGen/DAGISelMatcherGen.cpp
@@ -886,7 +886,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
"Node has no result");
- AddMatcher(new EmitNodeMatcher(II.Namespace+"::"+II.TheDef->getName().str(),
+ AddMatcher(new EmitNodeMatcher(II.Namespace.str()+"::"+II.TheDef->getName().str(),
ResultVTs, InstOps,
NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
NodeHasMemRefs, NumFixedArityOperands,
diff --git a/utils/TableGen/FastISelEmitter.cpp b/utils/TableGen/FastISelEmitter.cpp
index 0e7b0dc09442..25388b75cc0d 100644
--- a/utils/TableGen/FastISelEmitter.cpp
+++ b/utils/TableGen/FastISelEmitter.cpp
@@ -390,10 +390,10 @@ class FastISelMap {
std::map<OperandsSignature, std::vector<OperandsSignature> >
SignaturesWithConstantForms;
- std::string InstNS;
+ StringRef InstNS;
ImmPredicateSet ImmediatePredicates;
public:
- explicit FastISelMap(std::string InstNS);
+ explicit FastISelMap(StringRef InstNS);
void collectPatterns(CodeGenDAGPatterns &CGP);
void printImmediatePredicates(raw_ostream &OS);
@@ -417,7 +417,7 @@ static std::string getLegalCName(std::string OpName) {
return OpName;
}
-FastISelMap::FastISelMap(std::string instns) : InstNS(std::move(instns)) {}
+FastISelMap::FastISelMap(StringRef instns) : InstNS(instns) {}
static std::string PhyRegForNode(TreePatternNode *Op,
const CodeGenTarget &Target) {
@@ -440,10 +440,6 @@ static std::string PhyRegForNode(TreePatternNode *Op,
void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) {
const CodeGenTarget &Target = CGP.getTargetInfo();
- // Determine the target's namespace name.
- InstNS = Target.getInstNamespace() + "::";
- assert(InstNS.size() > 2 && "Can't determine target-specific namespace!");
-
// Scan through all the patterns and record the simple ones.
for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
E = CGP.ptm_end(); I != E; ++I) {
@@ -659,8 +655,8 @@ void FastISelMap::emitInstructionCode(raw_ostream &OS,
if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
ImmediatePredicates, true);
- OS << "(" << InstNS << Memo.Name << ", ";
- OS << "&" << InstNS << Memo.RC->getName() << "RegClass";
+ OS << "(" << InstNS << "::" << Memo.Name << ", ";
+ OS << "&" << InstNS << "::" << Memo.RC->getName() << "RegClass";
if (!Operands.empty())
OS << ", ";
Operands.PrintArguments(OS, *Memo.PhysRegs);
@@ -873,8 +869,8 @@ void EmitFastISel(RecordKeeper &RK, raw_ostream &OS) {
Target.getName().str() + " target", OS);
// Determine the target's namespace name.
- std::string InstNS = Target.getInstNamespace() + "::";
- assert(InstNS.size() > 2 && "Can't determine target-specific namespace!");
+ StringRef InstNS = Target.getInstNamespace();
+ assert(!InstNS.empty() && "Can't determine target-specific namespace!");
FastISelMap F(InstNS);
F.collectPatterns(CGP);
diff --git a/utils/TableGen/FixedLenDecoderEmitter.cpp b/utils/TableGen/FixedLenDecoderEmitter.cpp
index 75fd73082b9a..03930d7132df 100644
--- a/utils/TableGen/FixedLenDecoderEmitter.cpp
+++ b/utils/TableGen/FixedLenDecoderEmitter.cpp
@@ -1691,9 +1691,7 @@ void FilterChooser::emitTableEntries(DecoderTableInfo &TableInfo) const {
dumpStack(errs(), "\t\t");
for (unsigned i = 0; i < Opcodes.size(); ++i) {
- const std::string &Name = nameWithID(Opcodes[i]);
-
- errs() << '\t' << Name << " ";
+ errs() << '\t' << nameWithID(Opcodes[i]) << " ";
dumpBits(errs(),
getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
errs() << '\n';
diff --git a/utils/TableGen/GlobalISelEmitter.cpp b/utils/TableGen/GlobalISelEmitter.cpp
index 924ed8f65c2c..cafcbeb57de5 100644
--- a/utils/TableGen/GlobalISelEmitter.cpp
+++ b/utils/TableGen/GlobalISelEmitter.cpp
@@ -53,6 +53,8 @@ STATISTIC(NumPatternTotal, "Total number of patterns");
STATISTIC(NumPatternImported, "Number of patterns imported from SelectionDAG");
STATISTIC(NumPatternImportsSkipped, "Number of SelectionDAG imports skipped");
STATISTIC(NumPatternEmitted, "Number of patterns emitted");
+/// A unique identifier for a MatchTable.
+static unsigned CurrentMatchTableID = 0;
cl::OptionCategory GlobalISelEmitterCat("Options for -gen-global-isel");
@@ -74,6 +76,18 @@ private:
public:
LLTCodeGen(const LLT &Ty) : Ty(Ty) {}
+ void emitCxxEnumValue(raw_ostream &OS) const {
+ if (Ty.isScalar()) {
+ OS << "GILLT_s" << Ty.getSizeInBits();
+ return;
+ }
+ if (Ty.isVector()) {
+ OS << "GILLT_v" << Ty.getNumElements() << "s" << Ty.getScalarSizeInBits();
+ return;
+ }
+ llvm_unreachable("Unhandled LLT");
+ }
+
void emitCxxConstructorCall(raw_ostream &OS) const {
if (Ty.isScalar()) {
OS << "LLT::scalar(" << Ty.getSizeInBits() << ")";
@@ -88,6 +102,33 @@ public:
}
const LLT &get() const { return Ty; }
+
+ /// This ordering is used for std::unique() and std::sort(). There's no
+ /// particular logic behind the order.
+ bool operator<(const LLTCodeGen &Other) const {
+ if (!Ty.isValid())
+ return Other.Ty.isValid();
+ if (Ty.isScalar()) {
+ if (!Other.Ty.isValid())
+ return false;
+ if (Other.Ty.isScalar())
+ return Ty.getSizeInBits() < Other.Ty.getSizeInBits();
+ return false;
+ }
+ if (Ty.isVector()) {
+ if (!Other.Ty.isValid() || Other.Ty.isScalar())
+ return false;
+ if (Other.Ty.isVector()) {
+ if (Ty.getNumElements() < Other.Ty.getNumElements())
+ return true;
+ if (Ty.getNumElements() > Other.Ty.getNumElements())
+ return false;
+ return Ty.getSizeInBits() < Other.Ty.getSizeInBits();
+ }
+ return false;
+ }
+ llvm_unreachable("Unhandled LLT");
+ }
};
class InstructionMatcher;
@@ -169,6 +210,13 @@ static Record *getInitValueAsRegClass(Init *V) {
return nullptr;
}
+std::string
+getNameForFeatureBitset(const std::vector<Record *> &FeatureBitset) {
+ std::string Name = "GIFBS";
+ for (const auto &Feature : FeatureBitset)
+ Name += ("_" + Feature->getName()).str();
+ return Name;
+}
//===- Matchers -----------------------------------------------------------===//
class OperandMatcher;
@@ -187,8 +235,8 @@ class RuleMatcher {
std::vector<std::unique_ptr<MatchAction>> Actions;
/// A map of instruction matchers to the local variables created by
- /// emitCxxCaptureStmts().
- std::map<const InstructionMatcher *, std::string> InsnVariableNames;
+ /// emitCaptureOpcodes().
+ std::map<const InstructionMatcher *, unsigned> InsnVariableIDs;
/// ID for the next instruction variable defined with defineInsnVar()
unsigned NextInsnVarID;
@@ -197,35 +245,39 @@ class RuleMatcher {
public:
RuleMatcher()
- : Matchers(), Actions(), InsnVariableNames(), NextInsnVarID(0) {}
+ : Matchers(), Actions(), InsnVariableIDs(), NextInsnVarID(0) {}
RuleMatcher(RuleMatcher &&Other) = default;
RuleMatcher &operator=(RuleMatcher &&Other) = default;
InstructionMatcher &addInstructionMatcher();
void addRequiredFeature(Record *Feature);
+ const std::vector<Record *> &getRequiredFeatures() const;
template <class Kind, class... Args> Kind &addAction(Args &&... args);
- std::string defineInsnVar(raw_ostream &OS, const InstructionMatcher &Matcher,
- StringRef Value);
- StringRef getInsnVarName(const InstructionMatcher &InsnMatcher) const;
+ /// Define an instruction without emitting any code to do so.
+ /// This is used for the root of the match.
+ unsigned implicitlyDefineInsnVar(const InstructionMatcher &Matcher);
+ /// Define an instruction and emit corresponding state-machine opcodes.
+ unsigned defineInsnVar(raw_ostream &OS, const InstructionMatcher &Matcher,
+ unsigned InsnVarID, unsigned OpIdx);
+ unsigned getInsnVarID(const InstructionMatcher &InsnMatcher) const;
- void emitCxxCapturedInsnList(raw_ostream &OS);
- void emitCxxCaptureStmts(raw_ostream &OS, StringRef Expr);
+ void emitCaptureOpcodes(raw_ostream &OS);
-void emit(raw_ostream &OS, SubtargetFeatureInfoMap SubtargetFeatures);
+ void emit(raw_ostream &OS);
-/// Compare the priority of this object and B.
-///
-/// Returns true if this object is more important than B.
-bool isHigherPriorityThan(const RuleMatcher &B) const;
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool isHigherPriorityThan(const RuleMatcher &B) const;
-/// Report the maximum number of temporary operands needed by the rule
-/// matcher.
-unsigned countRendererFns() const;
+ /// Report the maximum number of temporary operands needed by the rule
+ /// matcher.
+ unsigned countRendererFns() const;
-// FIXME: Remove this as soon as possible
-InstructionMatcher &insnmatcher_front() const { return *Matchers.front(); }
+ // FIXME: Remove this as soon as possible
+ InstructionMatcher &insnmatcher_front() const { return *Matchers.front(); }
};
template <class PredicateTy> class PredicateListMatcher {
@@ -255,21 +307,16 @@ public:
return Predicates.size();
}
- /// Emit a C++ expression that tests whether all the predicates are met.
+ /// Emit MatchTable opcodes that tests whether all the predicates are met.
template <class... Args>
- void emitCxxPredicateListExpr(raw_ostream &OS, Args &&... args) const {
+ void emitPredicateListOpcodes(raw_ostream &OS, Args &&... args) const {
if (Predicates.empty()) {
- OS << "true";
+ OS << "// No predicates\n";
return;
}
- StringRef Separator = "";
- for (const auto &Predicate : predicates()) {
- OS << Separator << "(";
- Predicate->emitCxxPredicateExpr(OS, std::forward<Args>(args)...);
- OS << ")";
- Separator = " &&\n";
- }
+ for (const auto &Predicate : predicates())
+ Predicate->emitPredicateOpcodes(OS, std::forward<Args>(args)...);
}
};
@@ -291,6 +338,7 @@ public:
enum PredicateKind {
OPM_ComplexPattern,
OPM_Instruction,
+ OPM_IntrinsicID,
OPM_Int,
OPM_LiteralInt,
OPM_LLT,
@@ -318,15 +366,17 @@ public:
return None;
}
- /// Emit C++ statements to capture instructions into local variables.
+ /// Emit MatchTable opcodes to capture instructions into the MIs table.
///
- /// Only InstructionOperandMatcher needs to do anything for this method.
- virtual void emitCxxCaptureStmts(raw_ostream &OS, RuleMatcher &Rule,
- StringRef Expr) const {}
+ /// Only InstructionOperandMatcher needs to do anything for this method the
+ /// rest just walk the tree.
+ virtual void emitCaptureOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const {}
- /// Emit a C++ expression that checks the predicate for the given operand.
- virtual void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const = 0;
+ /// Emit MatchTable opcodes that check the predicate for the given operand.
+ virtual void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID,
+ unsigned OpIdx) const = 0;
/// Compare the priority of this object and B.
///
@@ -353,11 +403,12 @@ public:
return P->getKind() == OPM_LLT;
}
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const override {
- OS << "MRI.getType(" << OperandExpr << ".getReg()) == (";
- Ty.emitCxxConstructorCall(OS);
- OS << ")";
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckType, /*MI*/" << InsnVarID << ", /*Op*/" << OpIdx
+ << ", /*Type*/";
+ Ty.emitCxxEnumValue(OS);
+ OS << ", \n";
}
};
@@ -379,11 +430,12 @@ public:
return P->getKind() == OPM_ComplexPattern;
}
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const override {
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
unsigned ID = getAllocatedTemporariesBaseID();
- OS << "(Renderer" << ID << " = " << TheDef.getValueAsString("MatcherFn")
- << "(" << OperandExpr << "))";
+ OS << " GIM_CheckComplexPattern, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", /*Renderer*/" << ID << ", GICP_"
+ << TheDef.getName() << ",\n";
}
unsigned countRendererFns() const override {
@@ -404,11 +456,10 @@ public:
return P->getKind() == OPM_RegBank;
}
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const override {
- OS << "(&RBI.getRegBankFromRegClass(" << RC.getQualifiedName()
- << "RegClass) == RBI.getRegBank(" << OperandExpr
- << ".getReg(), MRI, TRI))";
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckRegBankForClass, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", /*RC*/" << RC.getQualifiedName() << "RegClassID,\n";
}
};
@@ -421,9 +472,9 @@ public:
return P->getKind() == OPM_MBB;
}
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const override {
- OS << OperandExpr << ".isMBB()";
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckIsMBB, /*MI*/" << InsnVarID << ", /*Op*/" << OpIdx << ",\n";
}
};
@@ -441,9 +492,10 @@ public:
return P->getKind() == OPM_Int;
}
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const override {
- OS << "isOperandImmEqual(" << OperandExpr << ", " << Value << ", MRI)";
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckConstantInt, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", " << Value << ",\n";
}
};
@@ -461,10 +513,30 @@ public:
return P->getKind() == OPM_LiteralInt;
}
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const override {
- OS << OperandExpr << ".isCImm() && " << OperandExpr
- << ".getCImm()->equalsInt(" << Value << ")";
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckLiteralInt, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", " << Value << ",\n";
+ }
+};
+
+/// Generates code to check that an operand is an intrinsic ID.
+class IntrinsicIDOperandMatcher : public OperandPredicateMatcher {
+protected:
+ const CodeGenIntrinsic *II;
+
+public:
+ IntrinsicIDOperandMatcher(const CodeGenIntrinsic *II)
+ : OperandPredicateMatcher(OPM_IntrinsicID), II(II) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_IntrinsicID;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckIntrinsicID, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", Intrinsic::" << II->EnumName << ",\n";
}
};
@@ -496,8 +568,9 @@ public:
}
unsigned getOperandIndex() const { return OpIdx; }
- std::string getOperandExpr(StringRef InsnVarName) const {
- return (InsnVarName + ".getOperand(" + llvm::to_string(OpIdx) + ")").str();
+ std::string getOperandExpr(unsigned InsnVarID) const {
+ return "State.MIs[" + llvm::to_string(InsnVarID) + "]->getOperand(" +
+ llvm::to_string(OpIdx) + ")";
}
Optional<const OperandMatcher *>
@@ -515,25 +588,24 @@ public:
InstructionMatcher &getInstructionMatcher() const { return Insn; }
- /// Emit C++ statements to capture instructions into local variables.
- void emitCxxCaptureStmts(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const {
+ /// Emit MatchTable opcodes to capture instructions into the MIs table.
+ void emitCaptureOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const {
for (const auto &Predicate : predicates())
- Predicate->emitCxxCaptureStmts(OS, Rule, OperandExpr);
+ Predicate->emitCaptureOpcodes(OS, Rule, InsnVarID, OpIdx);
}
- /// Emit a C++ expression that tests whether the instruction named in
- /// InsnVarName matches all the predicate and all the operands.
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef InsnVarName) const {
- OS << "(/* ";
+ /// Emit MatchTable opcodes that test whether the instruction named in
+ /// InsnVarID matches all the predicates and all the operands.
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const {
+ OS << " // MIs[" << InsnVarID << "] ";
if (SymbolicName.empty())
OS << "Operand " << OpIdx;
else
OS << SymbolicName;
- OS << " */ ";
- emitCxxPredicateListExpr(OS, Rule, getOperandExpr(InsnVarName));
- OS << ")";
+ OS << "\n";
+ emitPredicateListOpcodes(OS, Rule, InsnVarID, OpIdx);
}
/// Compare the priority of this object and B.
@@ -599,10 +671,10 @@ public:
PredicateKind getKind() const { return Kind; }
- /// Emit a C++ expression that tests whether the instruction named in
- /// InsnVarName matches the predicate.
- virtual void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef InsnVarName) const = 0;
+ /// Emit MatchTable opcodes that test whether the instruction named in
+ /// InsnVarID matches the predicate.
+ virtual void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const = 0;
/// Compare the priority of this object and B.
///
@@ -630,10 +702,10 @@ public:
return P->getKind() == IPM_Opcode;
}
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef InsnVarName) const override {
- OS << InsnVarName << ".getOpcode() == " << I->Namespace
- << "::" << I->TheDef->getName();
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const override {
+ OS << " GIM_CheckOpcode, /*MI*/" << InsnVarID << ", " << I->Namespace
+ << "::" << I->TheDef->getName() << ",\n";
}
/// Compare the priority of this object and B.
@@ -721,26 +793,23 @@ public:
return make_range(operands_begin(), operands_end());
}
- /// Emit C++ statements to check the shape of the match and capture
- /// instructions into local variables.
- void emitCxxCaptureStmts(raw_ostream &OS, RuleMatcher &Rule, StringRef Expr) {
- OS << "if (" << Expr << ".getNumOperands() < " << getNumOperands() << ")\n"
- << " return false;\n";
- for (const auto &Operand : Operands) {
- Operand->emitCxxCaptureStmts(OS, Rule, Operand->getOperandExpr(Expr));
- }
+ /// Emit MatchTable opcodes to check the shape of the match and capture
+ /// instructions into the MIs table.
+ void emitCaptureOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnID) {
+ OS << " GIM_CheckNumOperands, /*MI*/" << InsnID << ", /*Expected*/"
+ << getNumOperands() << ",\n";
+ for (const auto &Operand : Operands)
+ Operand->emitCaptureOpcodes(OS, Rule, InsnID);
}
- /// Emit a C++ expression that tests whether the instruction named in
+ /// Emit MatchTable opcodes that test whether the instruction named in
/// InsnVarName matches all the predicates and all the operands.
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef InsnVarName) const {
- emitCxxPredicateListExpr(OS, Rule, InsnVarName);
- for (const auto &Operand : Operands) {
- OS << " &&\n(";
- Operand->emitCxxPredicateExpr(OS, Rule, InsnVarName);
- OS << ")";
- }
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const {
+ emitPredicateListOpcodes(OS, Rule, InsnVarID);
+ for (const auto &Operand : Operands)
+ Operand->emitPredicateOpcodes(OS, Rule, InsnVarID);
}
/// Compare the priority of this object and B.
@@ -817,24 +886,17 @@ public:
return InsnMatcher->getOptionalOperand(SymbolicName);
}
- void emitCxxCaptureStmts(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const override {
- OS << "if (!" << OperandExpr + ".isReg())\n"
- << " return false;\n"
- << "if (TRI.isPhysicalRegister(" << OperandExpr + ".getReg()))\n"
- << " return false;\n";
- std::string InsnVarName = Rule.defineInsnVar(
- OS, *InsnMatcher,
- ("*MRI.getVRegDef(" + OperandExpr + ".getReg())").str());
- InsnMatcher->emitCxxCaptureStmts(OS, Rule, InsnVarName);
+ void emitCaptureOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnID, unsigned OpIdx) const override {
+ unsigned InsnVarID = Rule.defineInsnVar(OS, *InsnMatcher, InsnID, OpIdx);
+ InsnMatcher->emitCaptureOpcodes(OS, Rule, InsnVarID);
}
- void emitCxxPredicateExpr(raw_ostream &OS, RuleMatcher &Rule,
- StringRef OperandExpr) const override {
- OperandExpr = Rule.getInsnVarName(*InsnMatcher);
- OS << "(";
- InsnMatcher->emitCxxPredicateExpr(OS, Rule, OperandExpr);
- OS << ")\n";
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID_,
+ unsigned OpIdx_) const override {
+ unsigned InsnVarID = Rule.getInsnVarID(*InsnMatcher);
+ InsnMatcher->emitPredicateOpcodes(OS, Rule, InsnVarID);
}
};
@@ -858,13 +920,14 @@ public:
RendererKind getKind() const { return Kind; }
- virtual void emitCxxRenderStmts(raw_ostream &OS, RuleMatcher &Rule) const = 0;
+ virtual void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const = 0;
};
/// A CopyRenderer emits code to copy a single operand from an existing
/// instruction to the one being built.
class CopyRenderer : public OperandRenderer {
protected:
+ unsigned NewInsnID;
/// The matcher for the instruction that this operand is copied from.
/// This provides the facility for looking up an a operand by it's name so
/// that it can be used as a source for the instruction being built.
@@ -873,9 +936,10 @@ protected:
const StringRef SymbolicName;
public:
- CopyRenderer(const InstructionMatcher &Matched, StringRef SymbolicName)
- : OperandRenderer(OR_Copy), Matched(Matched), SymbolicName(SymbolicName) {
- }
+ CopyRenderer(unsigned NewInsnID, const InstructionMatcher &Matched,
+ StringRef SymbolicName)
+ : OperandRenderer(OR_Copy), NewInsnID(NewInsnID), Matched(Matched),
+ SymbolicName(SymbolicName) {}
static bool classof(const OperandRenderer *R) {
return R->getKind() == OR_Copy;
@@ -883,12 +947,12 @@ public:
const StringRef getSymbolicName() const { return SymbolicName; }
- void emitCxxRenderStmts(raw_ostream &OS, RuleMatcher &Rule) const override {
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
const OperandMatcher &Operand = Matched.getOperand(SymbolicName);
- StringRef InsnVarName =
- Rule.getInsnVarName(Operand.getInstructionMatcher());
- std::string OperandExpr = Operand.getOperandExpr(InsnVarName);
- OS << " MIB.add(" << OperandExpr << "/*" << SymbolicName << "*/);\n";
+ unsigned OldInsnVarID = Rule.getInsnVarID(Operand.getInstructionMatcher());
+ OS << " GIR_Copy, /*NewInsnID*/" << NewInsnID << ", /*OldInsnID*/"
+ << OldInsnVarID << ", /*OpIdx*/" << Operand.getOperandIndex() << ", // "
+ << SymbolicName << "\n";
}
};
@@ -897,6 +961,7 @@ public:
/// subregister should be copied.
class CopySubRegRenderer : public OperandRenderer {
protected:
+ unsigned NewInsnID;
/// The matcher for the instruction that this operand is copied from.
/// This provides the facility for looking up an a operand by it's name so
/// that it can be used as a source for the instruction being built.
@@ -907,9 +972,9 @@ protected:
const CodeGenSubRegIndex *SubReg;
public:
- CopySubRegRenderer(const InstructionMatcher &Matched, StringRef SymbolicName,
- const CodeGenSubRegIndex *SubReg)
- : OperandRenderer(OR_CopySubReg), Matched(Matched),
+ CopySubRegRenderer(unsigned NewInsnID, const InstructionMatcher &Matched,
+ StringRef SymbolicName, const CodeGenSubRegIndex *SubReg)
+ : OperandRenderer(OR_CopySubReg), NewInsnID(NewInsnID), Matched(Matched),
SymbolicName(SymbolicName), SubReg(SubReg) {}
static bool classof(const OperandRenderer *R) {
@@ -918,13 +983,13 @@ public:
const StringRef getSymbolicName() const { return SymbolicName; }
- void emitCxxRenderStmts(raw_ostream &OS, RuleMatcher &Rule) const override {
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
const OperandMatcher &Operand = Matched.getOperand(SymbolicName);
- StringRef InsnVarName =
- Rule.getInsnVarName(Operand.getInstructionMatcher());
- std::string OperandExpr = Operand.getOperandExpr(InsnVarName);
- OS << " MIB.addReg(" << OperandExpr << ".getReg() /*" << SymbolicName
- << "*/, 0, " << SubReg->EnumValue << ");\n";
+ unsigned OldInsnVarID = Rule.getInsnVarID(Operand.getInstructionMatcher());
+ OS << " GIR_CopySubReg, /*NewInsnID*/" << NewInsnID
+ << ", /*OldInsnID*/" << OldInsnVarID << ", /*OpIdx*/"
+ << Operand.getOperandIndex() << ", /*SubRegIdx*/" << SubReg->EnumValue
+ << ", // " << SymbolicName << "\n";
}
};
@@ -932,39 +997,44 @@ public:
/// This is typically useful for WZR/XZR on AArch64.
class AddRegisterRenderer : public OperandRenderer {
protected:
+ unsigned InsnID;
const Record *RegisterDef;
public:
- AddRegisterRenderer(const Record *RegisterDef)
- : OperandRenderer(OR_Register), RegisterDef(RegisterDef) {}
+ AddRegisterRenderer(unsigned InsnID, const Record *RegisterDef)
+ : OperandRenderer(OR_Register), InsnID(InsnID), RegisterDef(RegisterDef) {
+ }
static bool classof(const OperandRenderer *R) {
return R->getKind() == OR_Register;
}
- void emitCxxRenderStmts(raw_ostream &OS, RuleMatcher &Rule) const override {
- OS << " MIB.addReg(" << (RegisterDef->getValue("Namespace")
- ? RegisterDef->getValueAsString("Namespace")
- : "")
- << "::" << RegisterDef->getName() << ");\n";
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
+ OS << " GIR_AddRegister, /*InsnID*/" << InsnID << ", "
+ << (RegisterDef->getValue("Namespace")
+ ? RegisterDef->getValueAsString("Namespace")
+ : "")
+ << "::" << RegisterDef->getName() << ",\n";
}
};
/// Adds a specific immediate to the instruction being built.
class ImmRenderer : public OperandRenderer {
protected:
+ unsigned InsnID;
int64_t Imm;
public:
- ImmRenderer(int64_t Imm)
- : OperandRenderer(OR_Imm), Imm(Imm) {}
+ ImmRenderer(unsigned InsnID, int64_t Imm)
+ : OperandRenderer(OR_Imm), InsnID(InsnID), Imm(Imm) {}
static bool classof(const OperandRenderer *R) {
return R->getKind() == OR_Imm;
}
- void emitCxxRenderStmts(raw_ostream &OS, RuleMatcher &Rule) const override {
- OS << " MIB.addImm(" << Imm << ");\n";
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
+ OS << " GIR_AddImm, /*InsnID*/" << InsnID << ", /*Imm*/" << Imm
+ << ",\n";
}
};
@@ -972,6 +1042,7 @@ public:
/// matcher function.
class RenderComplexPatternOperand : public OperandRenderer {
private:
+ unsigned InsnID;
const Record &TheDef;
/// The name of the operand.
const StringRef SymbolicName;
@@ -984,17 +1055,18 @@ private:
}
public:
- RenderComplexPatternOperand(const Record &TheDef, StringRef SymbolicName,
- unsigned RendererID)
- : OperandRenderer(OR_ComplexPattern), TheDef(TheDef),
+ RenderComplexPatternOperand(unsigned InsnID, const Record &TheDef,
+ StringRef SymbolicName, unsigned RendererID)
+ : OperandRenderer(OR_ComplexPattern), InsnID(InsnID), TheDef(TheDef),
SymbolicName(SymbolicName), RendererID(RendererID) {}
static bool classof(const OperandRenderer *R) {
return R->getKind() == OR_ComplexPattern;
}
- void emitCxxRenderStmts(raw_ostream &OS, RuleMatcher &Rule) const override {
- OS << "Renderer" << RendererID << "(MIB);\n";
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
+ OS << " GIR_ComplexRenderer, /*InsnID*/" << InsnID << ", /*RendererID*/"
+ << RendererID << ",\n";
}
};
@@ -1009,11 +1081,11 @@ public:
/// Emit the C++ statements to implement the action.
///
- /// \param RecycleVarName If given, it's an instruction to recycle. The
- /// requirements on the instruction vary from action to
- /// action.
+ /// \param RecycleInsnID If given, it's an instruction to recycle. The
+ /// requirements on the instruction vary from action to
+ /// action.
virtual void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
- StringRef RecycleVarName) const = 0;
+ unsigned RecycleInsnID) const = 0;
};
/// Generates a comment describing the matched rule being acted upon.
@@ -1025,8 +1097,9 @@ public:
DebugCommentAction(const PatternToMatch &P) : P(P) {}
void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
- StringRef RecycleVarName) const override {
- OS << "// " << *P.getSrcPattern() << " => " << *P.getDstPattern() << "\n";
+ unsigned RecycleInsnID) const override {
+ OS << " // " << *P.getSrcPattern() << " => " << *P.getDstPattern()
+ << "\n";
}
};
@@ -1034,7 +1107,7 @@ public:
/// into the desired instruction when this is possible.
class BuildMIAction : public MatchAction {
private:
- std::string Name;
+ unsigned InsnID;
const CodeGenInstruction *I;
const InstructionMatcher &Matched;
std::vector<std::unique_ptr<OperandRenderer>> OperandRenderers;
@@ -1058,9 +1131,9 @@ private:
}
public:
- BuildMIAction(const StringRef Name, const CodeGenInstruction *I,
+ BuildMIAction(unsigned InsnID, const CodeGenInstruction *I,
const InstructionMatcher &Matched)
- : Name(Name), I(I), Matched(Matched) {}
+ : InsnID(InsnID), I(I), Matched(Matched) {}
template <class Kind, class... Args>
Kind &addRenderer(Args&&... args) {
@@ -1070,84 +1143,74 @@ public:
}
void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
- StringRef RecycleVarName) const override {
+ unsigned RecycleInsnID) const override {
if (canMutate()) {
- OS << " " << RecycleVarName << ".setDesc(TII.get(" << I->Namespace
- << "::" << I->TheDef->getName() << "));\n";
+ OS << " GIR_MutateOpcode, /*InsnID*/" << InsnID
+ << ", /*RecycleInsnID*/ " << RecycleInsnID << ", /*Opcode*/"
+ << I->Namespace << "::" << I->TheDef->getName() << ",\n";
if (!I->ImplicitDefs.empty() || !I->ImplicitUses.empty()) {
- OS << " auto MIB = MachineInstrBuilder(MF, &" << RecycleVarName
- << ");\n";
-
for (auto Def : I->ImplicitDefs) {
auto Namespace = Def->getValue("Namespace")
? Def->getValueAsString("Namespace")
: "";
- OS << " MIB.addDef(" << Namespace << "::" << Def->getName()
- << ", RegState::Implicit);\n";
+ OS << " GIR_AddImplicitDef, " << InsnID << ", " << Namespace
+ << "::" << Def->getName() << ",\n";
}
for (auto Use : I->ImplicitUses) {
auto Namespace = Use->getValue("Namespace")
? Use->getValueAsString("Namespace")
: "";
- OS << " MIB.addUse(" << Namespace << "::" << Use->getName()
- << ", RegState::Implicit);\n";
+ OS << " GIR_AddImplicitUse, " << InsnID << ", " << Namespace
+ << "::" << Use->getName() << ",\n";
}
}
-
- OS << " MachineInstr &" << Name << " = " << RecycleVarName << ";\n";
return;
}
// TODO: Simple permutation looks like it could be almost as common as
// mutation due to commutative operations.
- OS << "MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, "
- "I.getDebugLoc(), TII.get("
- << I->Namespace << "::" << I->TheDef->getName() << "));\n";
+ OS << " GIR_BuildMI, /*InsnID*/" << InsnID << ", /*Opcode*/"
+ << I->Namespace << "::" << I->TheDef->getName() << ",\n";
for (const auto &Renderer : OperandRenderers)
- Renderer->emitCxxRenderStmts(OS, Rule);
- OS << " for (const auto *FromMI : ";
- Rule.emitCxxCapturedInsnList(OS);
- OS << ")\n";
- OS << " for (const auto &MMO : FromMI->memoperands())\n";
- OS << " MIB.addMemOperand(MMO);\n";
- OS << " " << RecycleVarName << ".eraseFromParent();\n";
- OS << " MachineInstr &" << Name << " = *MIB;\n";
+ Renderer->emitRenderOpcodes(OS, Rule);
+
+ OS << " GIR_MergeMemOperands, /*InsnID*/" << InsnID << ",\n"
+ << " GIR_EraseFromParent, /*InsnID*/" << RecycleInsnID << ",\n";
}
};
/// Generates code to constrain the operands of an output instruction to the
/// register classes specified by the definition of that instruction.
class ConstrainOperandsToDefinitionAction : public MatchAction {
- std::string Name;
+ unsigned InsnID;
public:
- ConstrainOperandsToDefinitionAction(const StringRef Name) : Name(Name) {}
+ ConstrainOperandsToDefinitionAction(unsigned InsnID) : InsnID(InsnID) {}
void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
- StringRef RecycleVarName) const override {
- OS << " constrainSelectedInstRegOperands(" << Name
- << ", TII, TRI, RBI);\n";
+ unsigned RecycleInsnID) const override {
+ OS << " GIR_ConstrainSelectedInstOperands, /*InsnID*/" << InsnID << ",\n";
}
};
/// Generates code to constrain the specified operand of an output instruction
/// to the specified register class.
class ConstrainOperandToRegClassAction : public MatchAction {
- std::string Name;
+ unsigned InsnID;
unsigned OpIdx;
const CodeGenRegisterClass &RC;
public:
- ConstrainOperandToRegClassAction(const StringRef Name, unsigned OpIdx,
+ ConstrainOperandToRegClassAction(unsigned InsnID, unsigned OpIdx,
const CodeGenRegisterClass &RC)
- : Name(Name), OpIdx(OpIdx), RC(RC) {}
+ : InsnID(InsnID), OpIdx(OpIdx), RC(RC) {}
void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
- StringRef RecycleVarName) const override {
- OS << " constrainOperandRegToRegClass(" << Name << ", " << OpIdx
- << ", " << RC.getQualifiedName() << "RegClass, TII, TRI, RBI);\n";
+ unsigned RecycleInsnID) const override {
+ OS << " GIR_ConstrainOperandRC, /*InsnID*/" << InsnID << ", /*Op*/"
+ << OpIdx << ", /*RC " << RC.getName() << "*/ " << RC.EnumValue << ",\n";
}
};
@@ -1160,53 +1223,49 @@ void RuleMatcher::addRequiredFeature(Record *Feature) {
RequiredFeatures.push_back(Feature);
}
+const std::vector<Record *> &RuleMatcher::getRequiredFeatures() const {
+ return RequiredFeatures;
+}
+
template <class Kind, class... Args>
Kind &RuleMatcher::addAction(Args &&... args) {
Actions.emplace_back(llvm::make_unique<Kind>(std::forward<Args>(args)...));
return *static_cast<Kind *>(Actions.back().get());
}
-std::string RuleMatcher::defineInsnVar(raw_ostream &OS,
- const InstructionMatcher &Matcher,
- StringRef Value) {
- std::string InsnVarName = "MI" + llvm::to_string(NextInsnVarID++);
- OS << "MachineInstr &" << InsnVarName << " = " << Value << ";\n";
- InsnVariableNames[&Matcher] = InsnVarName;
- return InsnVarName;
+unsigned
+RuleMatcher::implicitlyDefineInsnVar(const InstructionMatcher &Matcher) {
+ unsigned NewInsnVarID = NextInsnVarID++;
+ InsnVariableIDs[&Matcher] = NewInsnVarID;
+ return NewInsnVarID;
}
-StringRef
-RuleMatcher::getInsnVarName(const InstructionMatcher &InsnMatcher) const {
- const auto &I = InsnVariableNames.find(&InsnMatcher);
- if (I != InsnVariableNames.end())
- return I->second;
- llvm_unreachable("Matched Insn was not captured in a local variable");
+unsigned RuleMatcher::defineInsnVar(raw_ostream &OS,
+ const InstructionMatcher &Matcher,
+ unsigned InsnID, unsigned OpIdx) {
+ unsigned NewInsnVarID = implicitlyDefineInsnVar(Matcher);
+ OS << " GIM_RecordInsn, /*DefineMI*/" << NewInsnVarID << ", /*MI*/"
+ << InsnID << ", /*OpIdx*/" << OpIdx << ", // MIs[" << NewInsnVarID
+ << "]\n";
+ return NewInsnVarID;
}
-/// Emit a C++ initializer_list containing references to every matched
-/// instruction.
-void RuleMatcher::emitCxxCapturedInsnList(raw_ostream &OS) {
- SmallVector<StringRef, 2> Names;
- for (const auto &Pair : InsnVariableNames)
- Names.push_back(Pair.second);
- std::sort(Names.begin(), Names.end());
-
- OS << "{";
- for (const auto &Name : Names)
- OS << "&" << Name << ", ";
- OS << "}";
+unsigned RuleMatcher::getInsnVarID(const InstructionMatcher &InsnMatcher) const {
+ const auto &I = InsnVariableIDs.find(&InsnMatcher);
+ if (I != InsnVariableIDs.end())
+ return I->second;
+ llvm_unreachable("Matched Insn was not captured in a local variable");
}
-/// Emit C++ statements to check the shape of the match and capture
+/// Emit MatchTable opcodes to check the shape of the match and capture
/// instructions into local variables.
-void RuleMatcher::emitCxxCaptureStmts(raw_ostream &OS, StringRef Expr) {
+void RuleMatcher::emitCaptureOpcodes(raw_ostream &OS) {
assert(Matchers.size() == 1 && "Cannot handle multi-root matchers yet");
- std::string InsnVarName = defineInsnVar(OS, *Matchers.front(), Expr);
- Matchers.front()->emitCxxCaptureStmts(OS, *this, InsnVarName);
+ unsigned InsnVarID = implicitlyDefineInsnVar(*Matchers.front());
+ Matchers.front()->emitCaptureOpcodes(OS, *this, InsnVarID);
}
-void RuleMatcher::emit(raw_ostream &OS,
- SubtargetFeatureInfoMap SubtargetFeatures) {
+void RuleMatcher::emit(raw_ostream &OS) {
if (Matchers.empty())
llvm_unreachable("Unexpected empty matcher!");
@@ -1221,47 +1280,34 @@ void RuleMatcher::emit(raw_ostream &OS,
// on some targets but we don't need to make use of that yet.
assert(Matchers.size() == 1 && "Cannot handle multi-root matchers yet");
- OS << "if (";
- OS << "[&]() {\n";
+ OS << " const static int64_t MatchTable" << CurrentMatchTableID << "[] = {\n";
if (!RequiredFeatures.empty()) {
- OS << " PredicateBitset ExpectedFeatures = {";
- StringRef Separator = "";
- for (const auto &Predicate : RequiredFeatures) {
- const auto &I = SubtargetFeatures.find(Predicate);
- assert(I != SubtargetFeatures.end() && "Didn't import predicate?");
- OS << Separator << I->second.getEnumBitName();
- Separator = ", ";
- }
- OS << "};\n";
- OS << "if ((AvailableFeatures & ExpectedFeatures) != ExpectedFeatures)\n"
- << " return false;\n";
+ OS << " GIM_CheckFeatures, " << getNameForFeatureBitset(RequiredFeatures)
+ << ",\n";
}
- emitCxxCaptureStmts(OS, "I");
+ emitCaptureOpcodes(OS);
- OS << " if (";
- Matchers.front()->emitCxxPredicateExpr(OS, *this,
- getInsnVarName(*Matchers.front()));
- OS << ") {\n";
+ Matchers.front()->emitPredicateOpcodes(OS, *this,
+ getInsnVarID(*Matchers.front()));
// We must also check if it's safe to fold the matched instructions.
- if (InsnVariableNames.size() >= 2) {
+ if (InsnVariableIDs.size() >= 2) {
// Invert the map to create stable ordering (by var names)
- SmallVector<StringRef, 2> Names;
- for (const auto &Pair : InsnVariableNames) {
+ SmallVector<unsigned, 2> InsnIDs;
+ for (const auto &Pair : InsnVariableIDs) {
// Skip the root node since it isn't moving anywhere. Everything else is
// sinking to meet it.
if (Pair.first == Matchers.front().get())
continue;
- Names.push_back(Pair.second);
+ InsnIDs.push_back(Pair.second);
}
- std::sort(Names.begin(), Names.end());
+ std::sort(InsnIDs.begin(), InsnIDs.end());
- for (const auto &Name : Names) {
+ for (const auto &InsnID : InsnIDs) {
// Reject the difficult cases until we have a more accurate check.
- OS << " if (!isObviouslySafeToFold(" << Name
- << ")) return false;\n";
+ OS << " GIM_CheckIsSafeToFold, /*InsnID*/" << InsnID << ",\n";
// FIXME: Emit checks to determine it's _actually_ safe to fold and/or
// account for unsafe cases.
@@ -1300,14 +1346,17 @@ void RuleMatcher::emit(raw_ostream &OS,
}
}
- for (const auto &MA : Actions) {
- MA->emitCxxActionStmts(OS, *this, "I");
- }
-
- OS << " return true;\n";
- OS << " }\n";
- OS << " return false;\n";
- OS << " }()) { return true; }\n\n";
+ for (const auto &MA : Actions)
+ MA->emitCxxActionStmts(OS, *this, 0);
+ OS << " GIR_Done,\n"
+ << " };\n"
+ << " State.MIs.resize(1);\n"
+ << " DEBUG(dbgs() << \"Processing MatchTable" << CurrentMatchTableID
+ << "\\n\");\n"
+ << " if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable"
+ << CurrentMatchTableID << ", TII, MRI, TRI, RBI, AvailableFeatures)) {\n"
+ << " return true;\n"
+ << " }\n\n";
}
bool RuleMatcher::isHigherPriorityThan(const RuleMatcher &B) const {
@@ -1366,7 +1415,8 @@ private:
Error importRulePredicates(RuleMatcher &M, ArrayRef<Init *> Predicates);
Expected<InstructionMatcher &>
createAndImportSelDAGMatcher(InstructionMatcher &InsnMatcher,
- const TreePatternNode *Src) const;
+ const TreePatternNode *Src,
+ unsigned &TempOpIdx) const;
Error importChildMatcher(InstructionMatcher &InsnMatcher,
const TreePatternNode *SrcChild, unsigned OpIdx,
unsigned &TempOpIdx) const;
@@ -1425,8 +1475,12 @@ GlobalISelEmitter::importRulePredicates(RuleMatcher &M,
return Error::success();
}
-Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
- InstructionMatcher &InsnMatcher, const TreePatternNode *Src) const {
+Expected<InstructionMatcher &>
+GlobalISelEmitter::createAndImportSelDAGMatcher(InstructionMatcher &InsnMatcher,
+ const TreePatternNode *Src,
+ unsigned &TempOpIdx) const {
+ const CodeGenInstruction *SrcGIOrNull = nullptr;
+
// Start with the defined operands (i.e., the results of the root operator).
if (Src->getExtTypes().size() > 1)
return failedImport("Src pattern has multiple results");
@@ -1440,7 +1494,7 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
return failedImport(
"Unable to deduce gMIR opcode to handle Src (which is a leaf)");
} else {
- auto SrcGIOrNull = findNodeEquiv(Src->getOperator());
+ SrcGIOrNull = findNodeEquiv(Src->getOperator());
if (!SrcGIOrNull)
return failedImport("Pattern operator lacks an equivalent Instruction" +
explainOperator(Src->getOperator()));
@@ -1451,7 +1505,6 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
}
unsigned OpIdx = 0;
- unsigned TempOpIdx = 0;
for (const EEVT::TypeSet &Ty : Src->getExtTypes()) {
auto OpTyOrNone = MVTToLLT(Ty.getConcrete());
@@ -1474,10 +1527,27 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
return failedImport(
"Unable to deduce gMIR opcode to handle Src (which is a leaf)");
} else {
+ assert(SrcGIOrNull &&
+ "Expected to have already found an equivalent Instruction");
// Match the used operands (i.e. the children of the operator).
for (unsigned i = 0, e = Src->getNumChildren(); i != e; ++i) {
- if (auto Error = importChildMatcher(InsnMatcher, Src->getChild(i),
- OpIdx++, TempOpIdx))
+ TreePatternNode *SrcChild = Src->getChild(i);
+
+ // For G_INTRINSIC, the operand immediately following the defs is an
+ // intrinsic ID.
+ if (SrcGIOrNull->TheDef->getName() == "G_INTRINSIC" && i == 0) {
+ if (const CodeGenIntrinsic *II = Src->getIntrinsicInfo(CGP)) {
+ OperandMatcher &OM =
+ InsnMatcher.addOperand(OpIdx++, SrcChild->getName(), TempOpIdx);
+ OM.addPredicate<IntrinsicIDOperandMatcher>(II);
+ continue;
+ }
+
+ return failedImport("Expected IntInit containing instrinsic ID)");
+ }
+
+ if (auto Error =
+ importChildMatcher(InsnMatcher, SrcChild, OpIdx++, TempOpIdx))
return std::move(Error);
}
}
@@ -1513,7 +1583,7 @@ Error GlobalISelEmitter::importChildMatcher(InstructionMatcher &InsnMatcher,
auto OpTyOrNone = MVTToLLT(ChildTypes.front().getConcrete());
if (!OpTyOrNone)
- return failedImport("Src operand has an unsupported type");
+ return failedImport("Src operand has an unsupported type (" + to_string(*SrcChild) + ")");
OM.addPredicate<LLTOperandMatcher>(*OpTyOrNone);
// Check for nested instructions.
@@ -1521,8 +1591,8 @@ Error GlobalISelEmitter::importChildMatcher(InstructionMatcher &InsnMatcher,
// Map the node to a gMIR instruction.
InstructionOperandMatcher &InsnOperand =
OM.addPredicate<InstructionOperandMatcher>();
- auto InsnMatcherOrError =
- createAndImportSelDAGMatcher(InsnOperand.getInsnMatcher(), SrcChild);
+ auto InsnMatcherOrError = createAndImportSelDAGMatcher(
+ InsnOperand.getInsnMatcher(), SrcChild, TempOpIdx);
if (auto Error = InsnMatcherOrError.takeError())
return Error;
@@ -1581,7 +1651,7 @@ Error GlobalISelEmitter::importExplicitUseRenderer(
if (DstChild->getOperator()->isSubClassOf("SDNode")) {
auto &ChildSDNI = CGP.getSDNodeInfo(DstChild->getOperator());
if (ChildSDNI.getSDClassName() == "BasicBlockSDNode") {
- DstMIBuilder.addRenderer<CopyRenderer>(InsnMatcher,
+ DstMIBuilder.addRenderer<CopyRenderer>(0, InsnMatcher,
DstChild->getName());
return Error::success();
}
@@ -1606,13 +1676,14 @@ Error GlobalISelEmitter::importExplicitUseRenderer(
return failedImport("Dst operand has an unsupported type");
if (ChildRec->isSubClassOf("Register")) {
- DstMIBuilder.addRenderer<AddRegisterRenderer>(ChildRec);
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(0, ChildRec);
return Error::success();
}
if (ChildRec->isSubClassOf("RegisterClass") ||
ChildRec->isSubClassOf("RegisterOperand")) {
- DstMIBuilder.addRenderer<CopyRenderer>(InsnMatcher, DstChild->getName());
+ DstMIBuilder.addRenderer<CopyRenderer>(0, InsnMatcher,
+ DstChild->getName());
return Error::success();
}
@@ -1624,7 +1695,7 @@ Error GlobalISelEmitter::importExplicitUseRenderer(
const OperandMatcher &OM = InsnMatcher.getOperand(DstChild->getName());
DstMIBuilder.addRenderer<RenderComplexPatternOperand>(
- *ComplexPattern->second, DstChild->getName(),
+ 0, *ComplexPattern->second, DstChild->getName(),
OM.getAllocatedTemporariesBaseID());
return Error::success();
}
@@ -1667,12 +1738,12 @@ Expected<BuildMIAction &> GlobalISelEmitter::createAndImportInstructionRenderer(
IsExtractSubReg = true;
}
- auto &DstMIBuilder = M.addAction<BuildMIAction>("NewI", DstI, InsnMatcher);
+ auto &DstMIBuilder = M.addAction<BuildMIAction>(0, DstI, InsnMatcher);
// Render the explicit defs.
for (unsigned I = 0; I < DstI->Operands.NumDefs; ++I) {
const CGIOperandList::OperandInfo &DstIOperand = DstI->Operands[I];
- DstMIBuilder.addRenderer<CopyRenderer>(InsnMatcher, DstIOperand.Name);
+ DstMIBuilder.addRenderer<CopyRenderer>(0, InsnMatcher, DstIOperand.Name);
}
// EXTRACT_SUBREG needs to use a subregister COPY.
@@ -1695,7 +1766,7 @@ Expected<BuildMIAction &> GlobalISelEmitter::createAndImportInstructionRenderer(
}
DstMIBuilder.addRenderer<CopySubRegRenderer>(
- InsnMatcher, Dst->getChild(0)->getName(), SubIdx);
+ 0, InsnMatcher, Dst->getChild(0)->getName(), SubIdx);
return DstMIBuilder;
}
@@ -1751,12 +1822,12 @@ Error GlobalISelEmitter::importDefaultOperandRenderers(
}
if (const DefInit *DefaultDefOp = dyn_cast<DefInit>(DefaultOp)) {
- DstMIBuilder.addRenderer<AddRegisterRenderer>(DefaultDefOp->getDef());
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(0, DefaultDefOp->getDef());
continue;
}
if (const IntInit *DefaultIntOp = dyn_cast<IntInit>(DefaultOp)) {
- DstMIBuilder.addRenderer<ImmRenderer>(DefaultIntOp->getValue());
+ DstMIBuilder.addRenderer<ImmRenderer>(0, DefaultIntOp->getValue());
continue;
}
@@ -1809,7 +1880,9 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
to_string(DstI.Operands.NumDefs) + " def(s))");
InstructionMatcher &InsnMatcherTemp = M.addInstructionMatcher();
- auto InsnMatcherOrError = createAndImportSelDAGMatcher(InsnMatcherTemp, Src);
+ unsigned TempOpIdx = 0;
+ auto InsnMatcherOrError =
+ createAndImportSelDAGMatcher(InsnMatcherTemp, Src, TempOpIdx);
if (auto Error = InsnMatcherOrError.takeError())
return std::move(Error);
InstructionMatcher &InsnMatcher = InsnMatcherOrError.get();
@@ -1875,7 +1948,7 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
return failedImport("COPY_TO_REGCLASS operand #1 isn't a register class");
M.addAction<ConstrainOperandToRegClassAction>(
- "NewI", 0, Target.getRegisterClass(DstIOpRec));
+ 0, 0, Target.getRegisterClass(DstIOpRec));
// We're done with this pattern! It's eligible for GISel emission; return
// it.
@@ -1903,8 +1976,7 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
return failedImport("EXTRACT_SUBREG operand #1 isn't a register class");
CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
- CodeGenRegisterClass *SrcRC = CGRegs.getRegClass(
- getInitValueAsRegClass(Dst->getChild(0)->getLeafValue()));
+ CodeGenRegisterClass *SrcRC = CGRegs.getRegClass(DstIOpRec);
// It would be nice to leave this constraint implicit but we're required
// to pick a register class so constrain the result to a register class
@@ -1918,12 +1990,16 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
const auto &SrcRCDstRCPair =
SrcRC->getMatchingSubClassWithSubRegs(CGRegs, SubIdx);
assert(SrcRCDstRCPair->second && "Couldn't find a matching subclass");
- M.addAction<ConstrainOperandToRegClassAction>("NewI", 0,
- *SrcRCDstRCPair->second);
- M.addAction<ConstrainOperandToRegClassAction>("NewI", 1,
- *SrcRCDstRCPair->first);
- } else
- M.addAction<ConstrainOperandsToDefinitionAction>("NewI");
+ M.addAction<ConstrainOperandToRegClassAction>(0, 0, *SrcRCDstRCPair->second);
+ M.addAction<ConstrainOperandToRegClassAction>(0, 1, *SrcRCDstRCPair->first);
+
+ // We're done with this pattern! It's eligible for GISel emission; return
+ // it.
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ M.addAction<ConstrainOperandsToDefinitionAction>(0);
// We're done with this pattern! It's eligible for GISel emission; return it.
++NumPatternImported;
@@ -1969,6 +2045,14 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
return false;
});
+ std::vector<Record *> ComplexPredicates =
+ RK.getAllDerivedDefinitions("GIComplexOperandMatcher");
+ std::sort(ComplexPredicates.begin(), ComplexPredicates.end(),
+ [](const Record *A, const Record *B) {
+ if (A->getName() < B->getName())
+ return true;
+ return false;
+ });
unsigned MaxTemporaries = 0;
for (const auto &Rule : Rules)
MaxTemporaries = std::max(MaxTemporaries, Rule.countRendererFns());
@@ -1980,15 +2064,26 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
"llvm::PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;\n"
<< "#endif // ifdef GET_GLOBALISEL_PREDICATE_BITSET\n\n";
- OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n";
- for (unsigned I = 0; I < MaxTemporaries; ++I)
- OS << " mutable ComplexRendererFn Renderer" << I << ";\n";
- OS << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n\n";
-
- OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n";
- for (unsigned I = 0; I < MaxTemporaries; ++I)
- OS << ", Renderer" << I << "(nullptr)\n";
- OS << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n\n";
+ OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n"
+ << " mutable MatcherState State;\n"
+ << " typedef "
+ "ComplexRendererFn("
+ << Target.getName()
+ << "InstructionSelector::*ComplexMatcherMemFn)(MachineOperand &) const;\n"
+ << "const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> "
+ "MatcherInfo;\n"
+ << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n\n";
+
+ OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n"
+ << ", State(" << MaxTemporaries << "),\n"
+ << "MatcherInfo({TypeObjects, FeatureBitsets, {\n"
+ << " nullptr, // GICP_Invalid\n";
+ for (const auto &Record : ComplexPredicates)
+ OS << " &" << Target.getName()
+ << "InstructionSelector::" << Record->getValueAsString("MatcherFn")
+ << ", // " << Record->getName() << "\n";
+ OS << "}})\n"
+ << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n\n";
OS << "#ifdef GET_GLOBALISEL_IMPL\n";
SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(SubtargetFeatures,
@@ -2016,19 +2111,107 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
"computeAvailableFunctionFeatures", FunctionFeatures, OS,
"const MachineFunction *MF");
+ // Emit a table containing the LLT objects needed by the matcher and an enum
+ // for the matcher to reference them with.
+ std::vector<LLTCodeGen> TypeObjects = {
+ LLT::scalar(8), LLT::scalar(16), LLT::scalar(32),
+ LLT::scalar(64), LLT::scalar(80), LLT::vector(8, 1),
+ LLT::vector(16, 1), LLT::vector(32, 1), LLT::vector(64, 1),
+ LLT::vector(8, 8), LLT::vector(16, 8), LLT::vector(32, 8),
+ LLT::vector(64, 8), LLT::vector(4, 16), LLT::vector(8, 16),
+ LLT::vector(16, 16), LLT::vector(32, 16), LLT::vector(2, 32),
+ LLT::vector(4, 32), LLT::vector(8, 32), LLT::vector(16, 32),
+ LLT::vector(2, 64), LLT::vector(4, 64), LLT::vector(8, 64),
+ };
+ std::sort(TypeObjects.begin(), TypeObjects.end());
+ OS << "enum {\n";
+ for (const auto &TypeObject : TypeObjects) {
+ OS << " ";
+ TypeObject.emitCxxEnumValue(OS);
+ OS << ",\n";
+ }
+ OS << "};\n"
+ << "const static LLT TypeObjects[] = {\n";
+ for (const auto &TypeObject : TypeObjects) {
+ OS << " ";
+ TypeObject.emitCxxConstructorCall(OS);
+ OS << ",\n";
+ }
+ OS << "};\n\n";
+
+ // Emit a table containing the PredicateBitsets objects needed by the matcher
+ // and an enum for the matcher to reference them with.
+ std::vector<std::vector<Record *>> FeatureBitsets;
+ for (auto &Rule : Rules)
+ FeatureBitsets.push_back(Rule.getRequiredFeatures());
+ std::sort(
+ FeatureBitsets.begin(), FeatureBitsets.end(),
+ [&](const std::vector<Record *> &A, const std::vector<Record *> &B) {
+ if (A.size() < B.size())
+ return true;
+ if (A.size() > B.size())
+ return false;
+ for (const auto &Pair : zip(A, B)) {
+ if (std::get<0>(Pair)->getName() < std::get<1>(Pair)->getName())
+ return true;
+ if (std::get<0>(Pair)->getName() > std::get<1>(Pair)->getName())
+ return false;
+ }
+ return false;
+ });
+ FeatureBitsets.erase(
+ std::unique(FeatureBitsets.begin(), FeatureBitsets.end()),
+ FeatureBitsets.end());
+ OS << "enum {\n"
+ << " GIFBS_Invalid,\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " " << getNameForFeatureBitset(FeatureBitset) << ",\n";
+ }
+ OS << "};\n"
+ << "const static PredicateBitset FeatureBitsets[] {\n"
+ << " {}, // GIFBS_Invalid\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " {";
+ for (const auto &Feature : FeatureBitset) {
+ const auto &I = SubtargetFeatures.find(Feature);
+ assert(I != SubtargetFeatures.end() && "Didn't import predicate?");
+ OS << I->second.getEnumBitName() << ", ";
+ }
+ OS << "},\n";
+ }
+ OS << "};\n\n";
+
+ // Emit complex predicate table and an enum to reference them with.
+ OS << "enum {\n"
+ << " GICP_Invalid,\n";
+ for (const auto &Record : ComplexPredicates)
+ OS << " GICP_" << Record->getName() << ",\n";
+ OS << "};\n"
+ << "// See constructor for table contents\n\n";
+
OS << "bool " << Target.getName()
<< "InstructionSelector::selectImpl(MachineInstr &I) const {\n"
<< " MachineFunction &MF = *I.getParent()->getParent();\n"
- << " const MachineRegisterInfo &MRI = MF.getRegInfo();\n"
+ << " MachineRegisterInfo &MRI = MF.getRegInfo();\n"
<< " // FIXME: This should be computed on a per-function basis rather "
"than per-insn.\n"
<< " AvailableFunctionFeatures = computeAvailableFunctionFeatures(&STI, "
"&MF);\n"
- << " const PredicateBitset AvailableFeatures = getAvailableFeatures();\n";
+ << " const PredicateBitset AvailableFeatures = getAvailableFeatures();\n"
+ << " NewMIVector OutMIs;\n"
+ << " State.MIs.clear();\n"
+ << " State.MIs.push_back(&I);\n\n";
for (auto &Rule : Rules) {
- Rule.emit(OS, SubtargetFeatures);
+ Rule.emit(OS);
+ ++CurrentMatchTableID;
++NumPatternEmitted;
+ assert(CurrentMatchTableID == NumPatternEmitted &&
+ "Statistic deviates from number of emitted tables");
}
OS << " return false;\n"
diff --git a/utils/TableGen/InstrInfoEmitter.cpp b/utils/TableGen/InstrInfoEmitter.cpp
index ab7d964cd671..e270a17356f7 100644
--- a/utils/TableGen/InstrInfoEmitter.cpp
+++ b/utils/TableGen/InstrInfoEmitter.cpp
@@ -67,7 +67,7 @@ private:
void emitOperandTypesEnum(raw_ostream &OS, const CodeGenTarget &Target);
void initOperandMapData(
ArrayRef<const CodeGenInstruction *> NumberedInstructions,
- const std::string &Namespace,
+ StringRef Namespace,
std::map<std::string, unsigned> &Operands,
OpNameMapTy &OperandMap);
void emitOperandNameMappings(raw_ostream &OS, const CodeGenTarget &Target,
@@ -207,7 +207,7 @@ void InstrInfoEmitter::EmitOperandInfo(raw_ostream &OS,
/// well as the getNamedOperandIdx() function.
void InstrInfoEmitter::initOperandMapData(
ArrayRef<const CodeGenInstruction *> NumberedInstructions,
- const std::string &Namespace,
+ StringRef Namespace,
std::map<std::string, unsigned> &Operands,
OpNameMapTy &OperandMap) {
unsigned NumOperands = 0;
@@ -224,7 +224,7 @@ void InstrInfoEmitter::initOperandMapData(
}
OpList[I->second] = Info.MIOperandNo;
}
- OperandMap[OpList].push_back(Namespace + "::" +
+ OperandMap[OpList].push_back(Namespace.str() + "::" +
Inst->TheDef->getName().str());
}
}
@@ -243,7 +243,7 @@ void InstrInfoEmitter::initOperandMapData(
void InstrInfoEmitter::emitOperandNameMappings(raw_ostream &OS,
const CodeGenTarget &Target,
ArrayRef<const CodeGenInstruction*> NumberedInstructions) {
- const std::string &Namespace = Target.getInstNamespace();
+ StringRef Namespace = Target.getInstNamespace();
std::string OpNameNS = "OpName";
// Map of operand names to their enumeration value. This will be used to
// generate the OpName enum.
@@ -315,7 +315,7 @@ void InstrInfoEmitter::emitOperandNameMappings(raw_ostream &OS,
void InstrInfoEmitter::emitOperandTypesEnum(raw_ostream &OS,
const CodeGenTarget &Target) {
- const std::string &Namespace = Target.getInstNamespace();
+ StringRef Namespace = Target.getInstNamespace();
std::vector<Record *> Operands = Records.getAllDerivedDefinitions("Operand");
OS << "#ifdef GET_INSTRINFO_OPERAND_TYPES_ENUM\n";
@@ -576,7 +576,7 @@ void InstrInfoEmitter::emitEnums(raw_ostream &OS) {
CodeGenTarget Target(Records);
// We must emit the PHI opcode first...
- std::string Namespace = Target.getInstNamespace();
+ StringRef Namespace = Target.getInstNamespace();
if (Namespace.empty())
PrintFatalError("No instructions defined!");
diff --git a/utils/TableGen/RegisterBankEmitter.cpp b/utils/TableGen/RegisterBankEmitter.cpp
index 3f11eff1d371..880d075da427 100644
--- a/utils/TableGen/RegisterBankEmitter.cpp
+++ b/utils/TableGen/RegisterBankEmitter.cpp
@@ -227,7 +227,7 @@ void RegisterBankEmitter::emitBaseClassImplementation(
OS << " // " << LowestIdxInWord << "-" << (LowestIdxInWord + 31) << "\n";
for (const auto &RC : RCs) {
std::string QualifiedRegClassID =
- (Twine(TargetName) + "::" + RC->getName() + "RegClassID").str();
+ (Twine(RC->Namespace) + "::" + RC->getName() + "RegClassID").str();
OS << " (1u << (" << QualifiedRegClassID << " - "
<< LowestIdxInWord << ")) |\n";
}
diff --git a/utils/TableGen/SearchableTableEmitter.cpp b/utils/TableGen/SearchableTableEmitter.cpp
index efd4e83eca90..f73c197dee5a 100644
--- a/utils/TableGen/SearchableTableEmitter.cpp
+++ b/utils/TableGen/SearchableTableEmitter.cpp
@@ -230,7 +230,7 @@ void SearchableTableEmitter::emitLookupDeclaration(StringRef Name,
void SearchableTableEmitter::emitMapping(Record *InstanceClass,
raw_ostream &OS) {
- const std::string &TableName = InstanceClass->getName();
+ StringRef TableName = InstanceClass->getName();
std::vector<Record *> Items = Records.getAllDerivedDefinitions(TableName);
// Gather all the records we're going to need for this particular mapping.
@@ -265,8 +265,8 @@ void SearchableTableEmitter::emitMapping(Record *InstanceClass,
++Idx;
}
- OS << "#ifdef GET_" << StringRef(TableName).upper() << "_DECL\n";
- OS << "#undef GET_" << StringRef(TableName).upper() << "_DECL\n";
+ OS << "#ifdef GET_" << TableName.upper() << "_DECL\n";
+ OS << "#undef GET_" << TableName.upper() << "_DECL\n";
// Next emit the enum containing the top-level names for use in C++ code if
// requested
@@ -281,8 +281,8 @@ void SearchableTableEmitter::emitMapping(Record *InstanceClass,
OS << "#endif\n\n";
- OS << "#ifdef GET_" << StringRef(TableName).upper() << "_IMPL\n";
- OS << "#undef GET_" << StringRef(TableName).upper() << "_IMPL\n";
+ OS << "#ifdef GET_" << TableName.upper() << "_IMPL\n";
+ OS << "#undef GET_" << TableName.upper() << "_IMPL\n";
// The primary data table contains all the fields defined for this map.
emitPrimaryTable(TableName, FieldNames, SearchFieldNames, SearchTables, Items,
diff --git a/utils/TableGen/SubtargetEmitter.cpp b/utils/TableGen/SubtargetEmitter.cpp
index 16d5740b79a3..d1d873b66aaa 100644
--- a/utils/TableGen/SubtargetEmitter.cpp
+++ b/utils/TableGen/SubtargetEmitter.cpp
@@ -375,7 +375,7 @@ EmitStageAndOperandCycleData(raw_ostream &OS,
if (FUs.empty())
continue;
- const std::string &Name = ProcModel.ItinsDef->getName();
+ StringRef Name = ProcModel.ItinsDef->getName();
OS << "\n// Functional units for \"" << Name << "\"\n"
<< "namespace " << Name << "FU {\n";
@@ -429,7 +429,7 @@ EmitStageAndOperandCycleData(raw_ostream &OS,
if (!ProcModel.hasItineraries())
continue;
- const std::string &Name = ProcModel.ItinsDef->getName();
+ StringRef Name = ProcModel.ItinsDef->getName();
ItinList.resize(SchedModels.numInstrSchedClasses());
assert(ProcModel.ItinDefList.size() == ItinList.size() && "bad Itins");
@@ -546,9 +546,6 @@ EmitItineraries(raw_ostream &OS,
if (!ItinsDefSet.insert(ItinsDef).second)
continue;
- // Get processor itinerary name
- const std::string &Name = ItinsDef->getName();
-
// Get the itinerary list for the processor.
assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
std::vector<InstrItinerary> &ItinList = *ProcItinListsIter;
@@ -562,7 +559,7 @@ EmitItineraries(raw_ostream &OS,
OS << "static const llvm::InstrItinerary ";
// Begin processor itinerary table
- OS << Name << "[] = {\n";
+ OS << ItinsDef->getName() << "[] = {\n";
// For each itinerary class in CodeGenSchedClass::Index order.
for (unsigned j = 0, M = ItinList.size(); j < M; ++j) {
diff --git a/utils/TableGen/X86DisassemblerTables.cpp b/utils/TableGen/X86DisassemblerTables.cpp
index c9e36f96736a..c80b96905b30 100644
--- a/utils/TableGen/X86DisassemblerTables.cpp
+++ b/utils/TableGen/X86DisassemblerTables.cpp
@@ -10,7 +10,7 @@
// This file is part of the X86 Disassembler Emitter.
// It contains the implementation of the disassembler tables.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
diff --git a/utils/TableGen/X86DisassemblerTables.h b/utils/TableGen/X86DisassemblerTables.h
index 5a8688be0819..1171c7980f42 100644
--- a/utils/TableGen/X86DisassemblerTables.h
+++ b/utils/TableGen/X86DisassemblerTables.h
@@ -10,7 +10,7 @@
// This file is part of the X86 Disassembler Emitter.
// It contains the interface of the disassembler tables.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
diff --git a/utils/TableGen/X86ModRMFilters.h b/utils/TableGen/X86ModRMFilters.h
index d919c588c644..73d5602fd91c 100644
--- a/utils/TableGen/X86ModRMFilters.h
+++ b/utils/TableGen/X86ModRMFilters.h
@@ -11,7 +11,7 @@
// It contains ModR/M filters that determine which values of the ModR/M byte
// are valid for a partiuclar instruction.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
diff --git a/utils/TableGen/X86RecognizableInstr.cpp b/utils/TableGen/X86RecognizableInstr.cpp
index 55e75763ad69..202a71ae4dc4 100644
--- a/utils/TableGen/X86RecognizableInstr.cpp
+++ b/utils/TableGen/X86RecognizableInstr.cpp
@@ -10,7 +10,7 @@
// This file is part of the X86 Disassembler Emitter.
// It contains the implementation of a single recognizable instruction.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
@@ -367,7 +367,7 @@ void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
++operandIndex;
}
- const std::string &typeName = (*Operands)[operandIndex].Rec->getName();
+ StringRef typeName = (*Operands)[operandIndex].Rec->getName();
OperandEncoding encoding = encodingFromString(typeName, OpSize);
// Adjust the encoding type for an operand based on the instruction.
diff --git a/utils/TableGen/X86RecognizableInstr.h b/utils/TableGen/X86RecognizableInstr.h
index 7fe731ec8b1c..ea99935f8790 100644
--- a/utils/TableGen/X86RecognizableInstr.h
+++ b/utils/TableGen/X86RecognizableInstr.h
@@ -10,7 +10,7 @@
// This file is part of the X86 Disassembler Emitter.
// It contains the interface of a single recognizable instruction.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
diff --git a/utils/docker/build_docker_image.sh b/utils/docker/build_docker_image.sh
index 2ec07ab6da4b..33f690ad5c43 100755
--- a/utils/docker/build_docker_image.sh
+++ b/utils/docker/build_docker_image.sh
@@ -16,20 +16,37 @@ BUILDSCRIPT_ARGS=""
function show_usage() {
usage=$(cat << EOF
-Usage: build_docker_image.sh [options] [-- [buildscript_args]...]
+Usage: build_docker_image.sh [options] [-- [cmake_args]...]
Available options:
+ General:
+ -h|--help show this help message
+ Docker-specific:
-s|--source image source dir (i.e. debian8, nvidia-cuda, etc)
-d|--docker-repository docker repository for the image
-t|--docker-tag docker tag for the image
-Required options: --source and --docker-repository.
-
-All options after '--' are passed to buildscript (see
-scripts/build_install_llvm.sh).
+ LLVM-specific:
+ -b|--branch svn branch to checkout, i.e. 'trunk',
+ 'branches/release_40'
+ (default: 'trunk')
+ -r|--revision svn revision to checkout
+ -p|--llvm-project name of an svn project to checkout. Will also add the
+ project to a list LLVM_ENABLE_PROJECTS, passed to CMake.
+ For clang, please use 'clang', not 'cfe'.
+ Project 'llvm' is always included and ignored, if
+ specified.
+ Can be specified multiple times.
+ -i|--install-target name of a cmake install target to build and include in
+ the resulting archive. Can be specified multiple times.
+
+Required options: --source and --docker-repository, at least one
+ --install-target.
+
+All options after '--' are passed to CMake invocation.
For example, running:
$ build_docker_image.sh -s debian8 -d mydocker/debian8-clang -t latest \
- -- -p clang -i install-clang -i install-clang-headers
+ -p clang -i install-clang -i install-clang-headers
will produce two docker images:
mydocker/debian8-clang-build:latest - an intermediate image used to compile
clang.
@@ -37,12 +54,21 @@ will produce two docker images:
Please note that this example produces a not very useful installation, since it
doesn't override CMake defaults, which produces a Debug and non-boostrapped
version of clang.
-For an example of a somewhat more useful build, see build_clang_image.sh.
+
+To get a 2-stage clang build, you could use this command:
+$ ./build_docker_image.sh -s debian8 -d mydocker/clang-debian8 -t "latest" \
+ -p clang -i stage2-install-clang -i stage2-install-clang-headers \
+ -- \
+ -DLLVM_TARGETS_TO_BUILD=Native -DCMAKE_BUILD_TYPE=Release \
+ -DBOOTSTRAP_CMAKE_BUILD_TYPE=Release \
+ -DCLANG_ENABLE_BOOTSTRAP=ON \
+ -DCLANG_BOOTSTRAP_TARGETS="install-clang;install-clang-headers"
EOF
)
echo "$usage"
}
+SEEN_INSTALL_TARGET=0
while [[ $# -gt 0 ]]; do
case "$1" in
-h|--help)
@@ -64,9 +90,16 @@ while [[ $# -gt 0 ]]; do
DOCKER_TAG="$1"
shift
;;
+ -i|--install-target|-r|--revision|-b|--branch|-p|--llvm-project)
+ if [ "$1" == "-i" ] || [ "$1" == "--install-target" ]; then
+ SEEN_INSTALL_TARGET=1
+ fi
+ BUILDSCRIPT_ARGS="$BUILDSCRIPT_ARGS $1 $2"
+ shift 2
+ ;;
--)
shift
- BUILDSCRIPT_ARGS="$*"
+ BUILDSCRIPT_ARGS="$BUILDSCRIPT_ARGS -- $*"
shift $#
;;
*)
@@ -92,6 +125,11 @@ if [ "$DOCKER_REPOSITORY" == "" ]; then
exit 1
fi
+if [ $SEEN_INSTALL_TARGET -eq 0 ]; then
+ echo "Please provide at least one --install-target"
+ exit 1
+fi
+
cd $(dirname $0)
if [ ! -d $IMAGE_SOURCE ]; then
echo "No sources for '$IMAGE_SOURCE' were found in $PWD"
diff --git a/utils/docker/scripts/build_install_llvm.sh b/utils/docker/scripts/build_install_llvm.sh
index 7e0e90657416..aef4e0cbca2c 100755
--- a/utils/docker/scripts/build_install_llvm.sh
+++ b/utils/docker/scripts/build_install_llvm.sh
@@ -65,6 +65,7 @@ while [[ $# -gt 0 ]]; do
-r|--revision)
shift
LLVM_SVN_REV="$1"
+ shift
;;
-b|--branch)
shift
@@ -79,7 +80,10 @@ while [[ $# -gt 0 ]]; do
fi
if ! contains_project "$PROJ" ; then
LLVM_PROJECTS="$LLVM_PROJECTS $PROJ"
- CMAKE_LLVM_ENABLE_PROJECTS="$CMAKE_LLVM_ENABLED_PROJECTS;$PROJ"
+ if [ "$CMAKE_LLVM_ENABLE_PROJECTS" != "" ]; then
+ CMAKE_LLVM_ENABLE_PROJECTS="$CMAKE_LLVM_ENABLE_PROJECTS;"
+ fi
+ CMAKE_LLVM_ENABLE_PROJECTS="$CMAKE_LLVM_ENABLED_PROJECTS$PROJ"
else
echo "Project '$PROJ' is already enabled, ignoring extra occurences."
fi
@@ -135,7 +139,7 @@ for LLVM_PROJECT in $LLVM_PROJECTS; do
SVN_PROJECT="$LLVM_PROJECT"
fi
- echo "Checking out http://llvm.org/svn/llvm-project/$SVN_PROJECT to $CLANG_BUILD_DIR/src/$LLVM_PROJECT"
+ echo "Checking out https://llvm.org/svn/llvm-project/$SVN_PROJECT to $CLANG_BUILD_DIR/src/$LLVM_PROJECT"
# FIXME: --trust-server-cert is required to workaround 'SSL issuer is not
# trusted' error. Using https seems preferable to http either way,
# albeit this is not secure.
@@ -144,11 +148,11 @@ for LLVM_PROJECT in $LLVM_PROJECTS; do
"$CLANG_BUILD_DIR/src/$LLVM_PROJECT"
done
-pushd "$CLANG_BUILD_DIR"
+mkdir "$CLANG_BUILD_DIR/build"
+pushd "$CLANG_BUILD_DIR/build"
# Run the build as specified in the build arguments.
echo "Running build"
-mkdir "$CLANG_BUILD_DIR/build"
cmake -GNinja \
-DCMAKE_INSTALL_PREFIX="$CLANG_INSTALL_DIR" \
-DLLVM_ENABLE_PROJECTS="$CMAKE_LLVM_ENABLE_PROJECTS" \
diff --git a/utils/lit/lit/TestRunner.py b/utils/lit/lit/TestRunner.py
index 37b03cc19f85..8260d3813345 100644
--- a/utils/lit/lit/TestRunner.py
+++ b/utils/lit/lit/TestRunner.py
@@ -5,6 +5,11 @@ import platform
import tempfile
import threading
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+
from lit.ShCommands import GlobItem
import lit.ShUtil as ShUtil
import lit.Test as Test
@@ -221,6 +226,155 @@ def updateEnv(env, cmd):
env.env[key] = val
cmd.args = cmd.args[arg_idx+1:]
+def executeBuiltinEcho(cmd, shenv):
+ """Interpret a redirected echo command"""
+ opened_files = []
+ stdin, stdout, stderr = processRedirects(cmd, subprocess.PIPE, shenv,
+ opened_files)
+ if stdin != subprocess.PIPE or stderr != subprocess.PIPE:
+ raise InternalShellError(
+ cmd, "stdin and stderr redirects not supported for echo")
+
+ # Some tests have un-redirected echo commands to help debug test failures.
+ # Buffer our output and return it to the caller.
+ is_redirected = True
+ if stdout == subprocess.PIPE:
+ is_redirected = False
+ stdout = StringIO()
+ elif kIsWindows:
+ # Reopen stdout in binary mode to avoid CRLF translation. The versions
+ # of echo we are replacing on Windows all emit plain LF, and the LLVM
+ # tests now depend on this.
+ stdout = open(stdout.name, stdout.mode + 'b')
+ opened_files.append((None, None, stdout, None))
+
+ # Implement echo flags. We only support -e and -n, and not yet in
+ # combination. We have to ignore unknown flags, because `echo "-D FOO"`
+ # prints the dash.
+ args = cmd.args[1:]
+ interpret_escapes = False
+ write_newline = True
+ while len(args) >= 1 and args[0] in ('-e', '-n'):
+ flag = args[0]
+ args = args[1:]
+ if flag == '-e':
+ interpret_escapes = True
+ elif flag == '-n':
+ write_newline = False
+
+ def maybeUnescape(arg):
+ if not interpret_escapes:
+ return arg
+ # Python string escapes and "echo" escapes are obviously different, but
+ # this should be enough for the LLVM test suite.
+ return arg.decode('string_escape')
+
+ if args:
+ for arg in args[:-1]:
+ stdout.write(maybeUnescape(arg))
+ stdout.write(' ')
+ stdout.write(maybeUnescape(args[-1]))
+ if write_newline:
+ stdout.write('\n')
+
+ for (name, mode, f, path) in opened_files:
+ f.close()
+
+ if not is_redirected:
+ return stdout.getvalue()
+ return ""
+
+def processRedirects(cmd, stdin_source, cmd_shenv, opened_files):
+ """Return the standard fds for cmd after applying redirects
+
+ Returns the three standard file descriptors for the new child process. Each
+ fd may be an open, writable file object or a sentinel value from the
+ subprocess module.
+ """
+
+ # Apply the redirections, we use (N,) as a sentinel to indicate stdin,
+ # stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
+ # from a file are represented with a list [file, mode, file-object]
+ # where file-object is initially None.
+ redirects = [(0,), (1,), (2,)]
+ for (op, filename) in cmd.redirects:
+ if op == ('>',2):
+ redirects[2] = [filename, 'w', None]
+ elif op == ('>>',2):
+ redirects[2] = [filename, 'a', None]
+ elif op == ('>&',2) and filename in '012':
+ redirects[2] = redirects[int(filename)]
+ elif op == ('>&',) or op == ('&>',):
+ redirects[1] = redirects[2] = [filename, 'w', None]
+ elif op == ('>',):
+ redirects[1] = [filename, 'w', None]
+ elif op == ('>>',):
+ redirects[1] = [filename, 'a', None]
+ elif op == ('<',):
+ redirects[0] = [filename, 'r', None]
+ else:
+ raise InternalShellError(cmd, "Unsupported redirect: %r" % (r,))
+
+ # Open file descriptors in a second pass.
+ std_fds = [None, None, None]
+ for (index, r) in enumerate(redirects):
+ # Handle the sentinel values for defaults up front.
+ if isinstance(r, tuple):
+ if r == (0,):
+ fd = stdin_source
+ elif r == (1,):
+ if index == 0:
+ raise InternalShellError(cmd, "Unsupported redirect for stdin")
+ elif index == 1:
+ fd = subprocess.PIPE
+ else:
+ fd = subprocess.STDOUT
+ elif r == (2,):
+ if index != 2:
+ raise InternalShellError(cmd, "Unsupported redirect on stdout")
+ fd = subprocess.PIPE
+ else:
+ raise InternalShellError(cmd, "Bad redirect")
+ std_fds[index] = fd
+ continue
+
+ (filename, mode, fd) = r
+
+ # Check if we already have an open fd. This can happen if stdout and
+ # stderr go to the same place.
+ if fd is not None:
+ std_fds[index] = fd
+ continue
+
+ redir_filename = None
+ name = expand_glob(filename, cmd_shenv.cwd)
+ if len(name) != 1:
+ raise InternalShellError(cmd, "Unsupported: glob in "
+ "redirect expanded to multiple files")
+ name = name[0]
+ if kAvoidDevNull and name == '/dev/null':
+ fd = tempfile.TemporaryFile(mode=mode)
+ elif kIsWindows and name == '/dev/tty':
+ # Simulate /dev/tty on Windows.
+ # "CON" is a special filename for the console.
+ fd = open("CON", mode)
+ else:
+ # Make sure relative paths are relative to the cwd.
+ redir_filename = os.path.join(cmd_shenv.cwd, name)
+ fd = open(redir_filename, mode)
+ # Workaround a Win32 and/or subprocess bug when appending.
+ #
+ # FIXME: Actually, this is probably an instance of PR6753.
+ if mode == 'a':
+ fd.seek(0, 2)
+ # Mutate the underlying redirect list so that we can redirect stdout
+ # and stderr to the same place without opening the file twice.
+ r[2] = fd
+ opened_files.append((filename, mode, fd) + (redir_filename,))
+ std_fds[index] = fd
+
+ return std_fds
+
def _executeShCmd(cmd, shenv, results, timeoutHelper):
if timeoutHelper.timeoutReached():
# Prevent further recursion if the timeout has been hit
@@ -269,6 +423,17 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
# following Popen calls will fail instead.
return 0
+ # Handle "echo" as a builtin if it is not part of a pipeline. This greatly
+ # speeds up tests that construct input files by repeatedly echo-appending to
+ # a file.
+ # FIXME: Standardize on the builtin echo implementation. We can use a
+ # temporary file to sidestep blocking pipe write issues.
+ if cmd.commands[0].args[0] == 'echo' and len(cmd.commands) == 1:
+ output = executeBuiltinEcho(cmd.commands[0], shenv)
+ results.append(ShellCommandResult(cmd.commands[0], output, "", 0,
+ False))
+ return 0
+
if cmd.commands[0].args[0] == 'export':
if len(cmd.commands) != 1:
raise ValueError("'export' cannot be part of a pipeline")
@@ -278,7 +443,7 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
return 0
procs = []
- input = subprocess.PIPE
+ default_stdin = subprocess.PIPE
stderrTempFiles = []
opened_files = []
named_temp_files = []
@@ -295,72 +460,8 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
cmd_shenv = ShellEnvironment(shenv.cwd, shenv.env)
updateEnv(cmd_shenv, j)
- # Apply the redirections, we use (N,) as a sentinel to indicate stdin,
- # stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
- # from a file are represented with a list [file, mode, file-object]
- # where file-object is initially None.
- redirects = [(0,), (1,), (2,)]
- for r in j.redirects:
- if r[0] == ('>',2):
- redirects[2] = [r[1], 'w', None]
- elif r[0] == ('>>',2):
- redirects[2] = [r[1], 'a', None]
- elif r[0] == ('>&',2) and r[1] in '012':
- redirects[2] = redirects[int(r[1])]
- elif r[0] == ('>&',) or r[0] == ('&>',):
- redirects[1] = redirects[2] = [r[1], 'w', None]
- elif r[0] == ('>',):
- redirects[1] = [r[1], 'w', None]
- elif r[0] == ('>>',):
- redirects[1] = [r[1], 'a', None]
- elif r[0] == ('<',):
- redirects[0] = [r[1], 'r', None]
- else:
- raise InternalShellError(j,"Unsupported redirect: %r" % (r,))
-
- # Map from the final redirections to something subprocess can handle.
- final_redirects = []
- for index,r in enumerate(redirects):
- if r == (0,):
- result = input
- elif r == (1,):
- if index == 0:
- raise InternalShellError(j,"Unsupported redirect for stdin")
- elif index == 1:
- result = subprocess.PIPE
- else:
- result = subprocess.STDOUT
- elif r == (2,):
- if index != 2:
- raise InternalShellError(j,"Unsupported redirect on stdout")
- result = subprocess.PIPE
- else:
- if r[2] is None:
- redir_filename = None
- name = expand_glob(r[0], cmd_shenv.cwd)
- if len(name) != 1:
- raise InternalShellError(j,"Unsupported: glob in redirect expanded to multiple files")
- name = name[0]
- if kAvoidDevNull and name == '/dev/null':
- r[2] = tempfile.TemporaryFile(mode=r[1])
- elif kIsWindows and name == '/dev/tty':
- # Simulate /dev/tty on Windows.
- # "CON" is a special filename for the console.
- r[2] = open("CON", r[1])
- else:
- # Make sure relative paths are relative to the cwd.
- redir_filename = os.path.join(cmd_shenv.cwd, name)
- r[2] = open(redir_filename, r[1])
- # Workaround a Win32 and/or subprocess bug when appending.
- #
- # FIXME: Actually, this is probably an instance of PR6753.
- if r[1] == 'a':
- r[2].seek(0, 2)
- opened_files.append(tuple(r) + (redir_filename,))
- result = r[2]
- final_redirects.append(result)
-
- stdin, stdout, stderr = final_redirects
+ stdin, stdout, stderr = processRedirects(j, default_stdin, cmd_shenv,
+ opened_files)
# If stderr wants to come from stdout, but stdout isn't a pipe, then put
# stderr on a pipe and treat it as stdout.
@@ -428,11 +529,11 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
# Update the current stdin source.
if stdout == subprocess.PIPE:
- input = procs[-1].stdout
+ default_stdin = procs[-1].stdout
elif stderrIsStdout:
- input = procs[-1].stderr
+ default_stdin = procs[-1].stderr
else:
- input = subprocess.PIPE
+ default_stdin = subprocess.PIPE
# Explicitly close any redirected files. We need to do this now because we
# need to release any handles we may have on the temporary files (important
diff --git a/utils/lit/lit/formats/googletest.py b/utils/lit/lit/formats/googletest.py
index b683f7c7db8e..9c55e71d2330 100644
--- a/utils/lit/lit/formats/googletest.py
+++ b/utils/lit/lit/formats/googletest.py
@@ -78,7 +78,10 @@ class GoogleTest(TestFormat):
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
- for fn in lit.util.listdir_files(os.path.join(source_path, subdir),
+ dir_path = os.path.join(source_path, subdir)
+ if not os.path.isdir(dir_path):
+ continue
+ for fn in lit.util.listdir_files(dir_path,
suffixes={self.test_suffix}):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py
index a7f407fc210c..530f962d336d 100755
--- a/utils/lit/lit/main.py
+++ b/utils/lit/lit/main.py
@@ -262,7 +262,8 @@ def main_with_tmp(builtinParameters):
selection_group.add_argument("--filter", metavar="REGEX",
help=("Only run tests with paths matching the given "
"regular expression"),
- action="store", default=None)
+ action="store",
+ default=os.environ.get("LIT_FILTER"))
selection_group.add_argument("--num-shards", dest="numShards", metavar="M",
help="Split testsuite into M pieces and only run one",
action="store", type=int,
diff --git a/utils/lit/tests/selecting.py b/utils/lit/tests/selecting.py
index 72d6fbabdc93..19ba240f9b0f 100644
--- a/utils/lit/tests/selecting.py
+++ b/utils/lit/tests/selecting.py
@@ -7,6 +7,11 @@
# RUN: %{lit} --filter 'o[a-z]e' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# CHECK-FILTER: Testing: 2 of 5 tests
+# Check that regex-filtering based on environment variables work.
+#
+# RUN: LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER-ENV %s
+# CHECK-FILTER-ENV: Testing: 2 of 5 tests
+
# Check that maximum counts work
#
diff --git a/utils/vim/syntax/llvm.vim b/utils/vim/syntax/llvm.vim
index 22d688b14864..e795c7f62133 100644
--- a/utils/vim/syntax/llvm.vim
+++ b/utils/vim/syntax/llvm.vim
@@ -1,7 +1,7 @@
" Vim syntax file
" Language: llvm
" Maintainer: The LLVM team, http://llvm.org/
-" Version: $Revision: 294808 $
+" Version: $Revision: 307419 $
if version < 600
syntax clear
@@ -54,6 +54,7 @@ syn keyword llvmKeyword
\ atomic
\ available_externally
\ blockaddress
+ \ builtin
\ byval
\ c
\ catch
@@ -105,10 +106,12 @@ syn keyword llvmKeyword
\ naked
\ nest
\ noalias
+ \ nobuiltin
\ nocapture
\ noimplicitfloat
\ noinline
\ nonlazybind
+ \ nonnull
\ norecurse
\ noredzone
\ noreturn
@@ -134,6 +137,7 @@ syn keyword llvmKeyword
\ signext
\ singlethread
\ source_filename
+ \ speculatable
\ spir_func
\ spir_kernel
\ sret