aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt2
-rw-r--r--docs/ReleaseNotes.rst30
-rw-r--r--lib/Analysis/InstructionSimplify.cpp34
-rw-r--r--lib/Analysis/ValueTracking.cpp11
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp28
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp39
-rw-r--r--lib/MC/MCParser/ELFAsmParser.cpp52
-rw-r--r--lib/Target/AMDGPU/AMDGPU.td9
-rw-r--r--lib/Target/AMDGPU/AMDGPUFeatures.td9
-rw-r--r--lib/Target/AMDGPU/R600ISelLowering.cpp88
-rw-r--r--lib/Target/AMDGPU/R600ISelLowering.h8
-rw-r--r--lib/Target/AMDGPU/VOP3Instructions.td11
-rw-r--r--lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp116
-rw-r--r--lib/Transforms/Instrumentation/BoundsChecking.cpp82
-rw-r--r--test/CodeGen/AArch64/fcopysign.ll25
-rw-r--r--test/CodeGen/AMDGPU/kernel-args.ll19
-rw-r--r--test/CodeGen/AMDGPU/mad_uint24.ll93
-rw-r--r--test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll9
-rw-r--r--test/CodeGen/NVPTX/load-store.ll88
-rw-r--r--test/CodeGen/X86/masked_memop.ll59
-rw-r--r--test/Instrumentation/BoundsChecking/many-traps-2.ll65
-rw-r--r--test/MC/ELF/extra-section-flags.s12
-rw-r--r--test/Transforms/InstSimplify/AndOrXor.ll43
-rw-r--r--test/Transforms/InstSimplify/floating-point-compare.ll6
-rw-r--r--test/Transforms/NewGVN/pair_jumpthread.ll2
25 files changed, 678 insertions, 262 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 32134e250197..31df64089eb4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1037,6 +1037,6 @@ if(LLVM_DISTRIBUTION_COMPONENTS)
endif()
# This allows us to deploy the Universal CRT DLLs by passing -DCMAKE_INSTALL_UCRT_LIBRARIES=ON to CMake
-if (MSVC AND CMAKE_HOST_SYSTEM_NAME STREQUAL "Windows")
+if (MSVC AND CMAKE_HOST_SYSTEM_NAME STREQUAL "Windows" AND CMAKE_INSTALL_UCRT_LIBRARIES)
include(InstallRequiredSystemLibraries)
endif()
diff --git a/docs/ReleaseNotes.rst b/docs/ReleaseNotes.rst
index 58fb8828484c..2963a3df3a1c 100644
--- a/docs/ReleaseNotes.rst
+++ b/docs/ReleaseNotes.rst
@@ -40,6 +40,12 @@ Non-comprehensive list of changes in this release
functionality, or simply have a lot to talk about), see the `NOTE` below
for adding a new subsection.
+* The Windows installer no longer includes a Visual Studio integration.
+ Instead, a new
+ `LLVM Compiler Toolchain Visual Studio extension <https://marketplace.visualstudio.com/items?itemName=LLVMExtensions.llvm-toolchain>`
+ is available on the Visual Studio Marketplace. The new integration includes
+ support for Visual Studio 2017.
+
* Libraries have been renamed from 7.0 to 7. This change also impacts
downstream libraries like lldb.
@@ -155,6 +161,26 @@ Changes to the PowerPC Target
During this release ...
+Changes to the SystemZ Target
+-----------------------------
+
+During this release the SystemZ target has:
+
+* Added support for vector registers in inline asm statements.
+
+* Added support for stackmaps, patchpoints, and the anyregcc
+ calling convention.
+
+* Changed the default function alignment to 16 bytes.
+
+* Improved codegen for condition code handling.
+
+* Improved instruction scheduling and microarchitecture tuning for z13/z14.
+
+* Fixed support for generating GCOV coverage data.
+
+* Fixed some codegen bugs.
+
Changes to the X86 Target
-------------------------
@@ -192,6 +218,10 @@ Changes to the DAG infrastructure
* The SETCCE opcode has now been removed in favor of SETCCCARRY.
+* TableGen now supports multi-alternative pattern fragments via the PatFrags
+ class. PatFrag is now derived from PatFrags, which may require minor
+ changes to backends that directly access PatFrag members.
+
External Open Source Projects Using LLVM 7
==========================================
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 7fc7c15a0c25..f991291f565a 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -1863,6 +1863,40 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
MaxRecurse))
return V;
+ // Assuming the effective width of Y is not larger than A, i.e. all bits
+ // from X and Y are disjoint in (X << A) | Y,
+ // if the mask of this AND op covers all bits of X or Y, while it covers
+ // no bits from the other, we can bypass this AND op. E.g.,
+ // ((X << A) | Y) & Mask -> Y,
+ // if Mask = ((1 << effective_width_of(Y)) - 1)
+ // ((X << A) | Y) & Mask -> X << A,
+ // if Mask = ((1 << effective_width_of(X)) - 1) << A
+ // SimplifyDemandedBits in InstCombine can optimize the general case.
+ // This pattern aims to help other passes for a common case.
+ Value *Y, *XShifted;
+ if (match(Op1, m_APInt(Mask)) &&
+ match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
+ m_Value(XShifted)),
+ m_Value(Y)))) {
+ const unsigned ShftCnt = ShAmt->getZExtValue();
+ const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
+ const unsigned Width = Op0->getType()->getScalarSizeInBits();
+ const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
+ if (EffWidthY <= ShftCnt) {
+ const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI,
+ Q.DT);
+ const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros();
+ const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
+ const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
+ // If the mask is extracting all bits from X or Y as is, we can skip
+ // this AND op.
+ if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
+ return Y;
+ if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
+ return XShifted;
+ }
+ }
+
return nullptr;
}
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 0ef39163bda3..edd46c5fe362 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -2817,10 +2817,13 @@ static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
default:
break;
case Intrinsic::maxnum:
- return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
- Depth + 1) ||
- cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
- Depth + 1);
+ return (isKnownNeverNaN(I->getOperand(0)) &&
+ cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI,
+ SignBitOnly, Depth + 1)) ||
+ (isKnownNeverNaN(I->getOperand(1)) &&
+ cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
+ SignBitOnly, Depth + 1));
+
case Intrinsic::minnum:
return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
Depth + 1) &&
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 2b7ba1ffb309..36c436918916 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1489,24 +1489,20 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode *Node) const {
// Get the signbit at the right position for MagAsInt.
int ShiftAmount = SignAsInt.SignBit - MagAsInt.SignBit;
+ EVT ShiftVT = IntVT;
+ if (SignBit.getValueSizeInBits() < ClearedSign.getValueSizeInBits()) {
+ SignBit = DAG.getNode(ISD::ZERO_EXTEND, DL, MagVT, SignBit);
+ ShiftVT = MagVT;
+ }
+ if (ShiftAmount > 0) {
+ SDValue ShiftCnst = DAG.getConstant(ShiftAmount, DL, ShiftVT);
+ SignBit = DAG.getNode(ISD::SRL, DL, ShiftVT, SignBit, ShiftCnst);
+ } else if (ShiftAmount < 0) {
+ SDValue ShiftCnst = DAG.getConstant(-ShiftAmount, DL, ShiftVT);
+ SignBit = DAG.getNode(ISD::SHL, DL, ShiftVT, SignBit, ShiftCnst);
+ }
if (SignBit.getValueSizeInBits() > ClearedSign.getValueSizeInBits()) {
- if (ShiftAmount > 0) {
- SDValue ShiftCnst = DAG.getConstant(ShiftAmount, DL, IntVT);
- SignBit = DAG.getNode(ISD::SRL, DL, IntVT, SignBit, ShiftCnst);
- } else if (ShiftAmount < 0) {
- SDValue ShiftCnst = DAG.getConstant(-ShiftAmount, DL, IntVT);
- SignBit = DAG.getNode(ISD::SHL, DL, IntVT, SignBit, ShiftCnst);
- }
SignBit = DAG.getNode(ISD::TRUNCATE, DL, MagVT, SignBit);
- } else if (SignBit.getValueSizeInBits() < ClearedSign.getValueSizeInBits()) {
- SignBit = DAG.getNode(ISD::ZERO_EXTEND, DL, MagVT, SignBit);
- if (ShiftAmount > 0) {
- SDValue ShiftCnst = DAG.getConstant(ShiftAmount, DL, MagVT);
- SignBit = DAG.getNode(ISD::SRL, DL, MagVT, SignBit, ShiftCnst);
- } else if (ShiftAmount < 0) {
- SDValue ShiftCnst = DAG.getConstant(-ShiftAmount, DL, MagVT);
- SignBit = DAG.getNode(ISD::SHL, DL, MagVT, SignBit, ShiftCnst);
- }
}
// Store the part with the modified sign and convert back to float.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index f5d9dd234afd..b21249d01ef9 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -3641,26 +3641,43 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
}
SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(SDNode *N, unsigned OpNo) {
- assert(OpNo == 3 && "Can widen only data operand of mstore");
+ assert((OpNo == 2 || OpNo == 3) &&
+ "Can widen only data or mask operand of mstore");
MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
SDValue Mask = MST->getMask();
EVT MaskVT = Mask.getValueType();
SDValue StVal = MST->getValue();
- // Widen the value
- SDValue WideVal = GetWidenedVector(StVal);
SDLoc dl(N);
- // The mask should be widened as well.
- EVT WideVT = WideVal.getValueType();
- EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(),
- MaskVT.getVectorElementType(),
- WideVT.getVectorNumElements());
- Mask = ModifyToType(Mask, WideMaskVT, true);
+ if (OpNo == 3) {
+ // Widen the value
+ StVal = GetWidenedVector(StVal);
+
+ // The mask should be widened as well.
+ EVT WideVT = StVal.getValueType();
+ EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(),
+ MaskVT.getVectorElementType(),
+ WideVT.getVectorNumElements());
+ Mask = ModifyToType(Mask, WideMaskVT, true);
+ } else {
+ EVT WideMaskVT = TLI.getTypeToTransformTo(*DAG.getContext(), MaskVT);
+ Mask = ModifyToType(Mask, WideMaskVT, true);
+
+ EVT ValueVT = StVal.getValueType();
+ if (getTypeAction(ValueVT) == TargetLowering::TypeWidenVector)
+ StVal = GetWidenedVector(StVal);
+ else {
+ EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
+ ValueVT.getVectorElementType(),
+ WideMaskVT.getVectorNumElements());
+ StVal = ModifyToType(StVal, WideVT);
+ }
+ }
assert(Mask.getValueType().getVectorNumElements() ==
- WideVal.getValueType().getVectorNumElements() &&
+ StVal.getValueType().getVectorNumElements() &&
"Mask and data vectors should have the same number of elements");
- return DAG.getMaskedStore(MST->getChain(), dl, WideVal, MST->getBasePtr(),
+ return DAG.getMaskedStore(MST->getChain(), dl, StVal, MST->getBasePtr(),
Mask, MST->getMemoryVT(), MST->getMemOperand(),
false, MST->isCompressingStore());
}
diff --git a/lib/MC/MCParser/ELFAsmParser.cpp b/lib/MC/MCParser/ELFAsmParser.cpp
index 7bf14968c973..3d9590e1f9f5 100644
--- a/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/lib/MC/MCParser/ELFAsmParser.cpp
@@ -481,34 +481,6 @@ static bool hasPrefix(StringRef SectionName, StringRef Prefix) {
return SectionName.startswith(Prefix) || SectionName == Prefix.drop_back();
}
-// Return a set of section flags based on the section name that can then
-// be augmented later, otherwise return 0 if we don't have any reasonable
-// defaults.
-static unsigned defaultSectionFlags(StringRef SectionName) {
-
- if (hasPrefix(SectionName, ".rodata.cst"))
- return ELF::SHF_ALLOC | ELF::SHF_MERGE;
-
- if (hasPrefix(SectionName, ".rodata.") || SectionName == ".rodata1")
- return ELF::SHF_ALLOC;
-
- if (SectionName == ".fini" || SectionName == ".init" ||
- hasPrefix(SectionName, ".text."))
- return ELF::SHF_ALLOC | ELF::SHF_EXECINSTR;
-
- if (hasPrefix(SectionName, ".data.") || SectionName == ".data1" ||
- hasPrefix(SectionName, ".bss.") ||
- hasPrefix(SectionName, ".init_array.") ||
- hasPrefix(SectionName, ".fini_array.") ||
- hasPrefix(SectionName, ".preinit_array."))
- return ELF::SHF_ALLOC | ELF::SHF_WRITE;
-
- if (hasPrefix(SectionName, ".tdata.") || hasPrefix(SectionName, ".tbss."))
- return ELF::SHF_ALLOC | ELF::SHF_WRITE | ELF::SHF_TLS;
-
- return 0;
-}
-
bool ELFAsmParser::ParseSectionArguments(bool IsPush, SMLoc loc) {
StringRef SectionName;
@@ -518,13 +490,27 @@ bool ELFAsmParser::ParseSectionArguments(bool IsPush, SMLoc loc) {
StringRef TypeName;
int64_t Size = 0;
StringRef GroupName;
+ unsigned Flags = 0;
const MCExpr *Subsection = nullptr;
bool UseLastGroup = false;
MCSymbolELF *Associated = nullptr;
int64_t UniqueID = ~0;
- // Set the default section flags first in case no others are given.
- unsigned Flags = defaultSectionFlags(SectionName);
+ // Set the defaults first.
+ if (hasPrefix(SectionName, ".rodata.") || SectionName == ".rodata1")
+ Flags |= ELF::SHF_ALLOC;
+ else if (SectionName == ".fini" || SectionName == ".init" ||
+ hasPrefix(SectionName, ".text."))
+ Flags |= ELF::SHF_ALLOC | ELF::SHF_EXECINSTR;
+ else if (hasPrefix(SectionName, ".data.") || SectionName == ".data1" ||
+ hasPrefix(SectionName, ".bss.") ||
+ hasPrefix(SectionName, ".init_array.") ||
+ hasPrefix(SectionName, ".fini_array.") ||
+ hasPrefix(SectionName, ".preinit_array."))
+ Flags |= ELF::SHF_ALLOC | ELF::SHF_WRITE;
+ else if (hasPrefix(SectionName, ".tdata.") ||
+ hasPrefix(SectionName, ".tbss."))
+ Flags |= ELF::SHF_ALLOC | ELF::SHF_WRITE | ELF::SHF_TLS;
if (getLexer().is(AsmToken::Comma)) {
Lex();
@@ -552,12 +538,6 @@ bool ELFAsmParser::ParseSectionArguments(bool IsPush, SMLoc loc) {
if (extraFlags == -1U)
return TokError("unknown flag");
-
- // If we found additional section flags on a known section then give a
- // warning.
- if (Flags && Flags != extraFlags)
- Warning(loc, "setting incorrect section attributes for " + SectionName);
-
Flags |= extraFlags;
bool Mergeable = Flags & ELF::SHF_MERGE;
diff --git a/lib/Target/AMDGPU/AMDGPU.td b/lib/Target/AMDGPU/AMDGPU.td
index 16c2a366db28..445b69b35eb1 100644
--- a/lib/Target/AMDGPU/AMDGPU.td
+++ b/lib/Target/AMDGPU/AMDGPU.td
@@ -267,15 +267,6 @@ def FeatureD16PreservesUnusedBits : SubtargetFeature<
// Subtarget Features (options and debugging)
//===------------------------------------------------------------===//
-// Some instructions do not support denormals despite this flag. Using
-// fp32 denormals also causes instructions to run at the double
-// precision rate for the device.
-def FeatureFP32Denormals : SubtargetFeature<"fp32-denormals",
- "FP32Denormals",
- "true",
- "Enable single precision denormal handling"
->;
-
// Denormal handling for fp64 and fp16 is controlled by the same
// config register when fp16 supported.
// TODO: Do we need a separate f16 setting when not legal?
diff --git a/lib/Target/AMDGPU/AMDGPUFeatures.td b/lib/Target/AMDGPU/AMDGPUFeatures.td
index b375cae9018e..3c7d8a8fc550 100644
--- a/lib/Target/AMDGPU/AMDGPUFeatures.td
+++ b/lib/Target/AMDGPU/AMDGPUFeatures.td
@@ -19,6 +19,15 @@ def FeatureFMA : SubtargetFeature<"fmaf",
"Enable single precision FMA (not as fast as mul+add, but fused)"
>;
+// Some instructions do not support denormals despite this flag. Using
+// fp32 denormals also causes instructions to run at the double
+// precision rate for the device.
+def FeatureFP32Denormals : SubtargetFeature<"fp32-denormals",
+ "FP32Denormals",
+ "true",
+ "Enable single precision denormal handling"
+>;
+
class SubtargetFeatureLocalMemorySize <int Value> : SubtargetFeature<
"localmemorysize"#Value,
"LocalMemorySize",
diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp
index 113d6249fa60..e00dffc4be99 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -903,7 +903,7 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
unsigned DwordOffset) const {
unsigned ByteOffset = DwordOffset * 4;
PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
- AMDGPUASI.CONSTANT_BUFFER_0);
+ AMDGPUASI.PARAM_I_ADDRESS);
// We shouldn't be using an offset wider than 16-bits for implicit parameters.
assert(isInt<16>(ByteOffset));
@@ -1457,33 +1457,17 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
return scalarizeVectorLoad(LoadNode, DAG);
}
+ // This is still used for explicit load from addrspace(8)
int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace());
if (ConstantBlock > -1 &&
((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) ||
(LoadNode->getExtensionType() == ISD::ZEXTLOAD))) {
SDValue Result;
- if (isa<ConstantExpr>(LoadNode->getMemOperand()->getValue()) ||
- isa<Constant>(LoadNode->getMemOperand()->getValue()) ||
+ if (isa<Constant>(LoadNode->getMemOperand()->getValue()) ||
isa<ConstantSDNode>(Ptr)) {
- SDValue Slots[4];
- for (unsigned i = 0; i < 4; i++) {
- // We want Const position encoded with the following formula :
- // (((512 + (kc_bank << 12) + const_index) << 2) + chan)
- // const_index is Ptr computed by llvm using an alignment of 16.
- // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and
- // then div by 4 at the ISel step
- SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
- DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32));
- Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr);
- }
- EVT NewVT = MVT::v4i32;
- unsigned NumElements = 4;
- if (VT.isVector()) {
- NewVT = VT;
- NumElements = VT.getVectorNumElements();
- }
- Result = DAG.getBuildVector(NewVT, DL, makeArrayRef(Slots, NumElements));
+ return constBufferLoad(LoadNode, LoadNode->getAddressSpace(), DAG);
} else {
+ //TODO: Does this even work?
// non-constant ptr can't be folded, keeps it as a v4f32 load
Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
@@ -1622,7 +1606,7 @@ SDValue R600TargetLowering::LowerFormalArguments(
}
PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
- AMDGPUASI.CONSTANT_BUFFER_0);
+ AMDGPUASI.PARAM_I_ADDRESS);
// i64 isn't a legal type, so the register type used ends up as i32, which
// isn't expected here. It attempts to create this sextload, but it ends up
@@ -1646,17 +1630,17 @@ SDValue R600TargetLowering::LowerFormalArguments(
unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset();
unsigned PartOffset = VA.getLocMemOffset();
+ unsigned Alignment = MinAlign(VT.getStoreSize(), PartOffset);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy), PartOffset - ValBase);
SDValue Arg = DAG.getLoad(
ISD::UNINDEXED, Ext, VT, DL, Chain,
DAG.getConstant(PartOffset, DL, MVT::i32), DAG.getUNDEF(MVT::i32),
PtrInfo,
- MemVT, /* Alignment = */ 4, MachineMemOperand::MONonTemporal |
+ MemVT, Alignment, MachineMemOperand::MONonTemporal |
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant);
- // 4 is the preferred alignment for the CONSTANT memory space.
InVals.push_back(Arg);
}
return Chain;
@@ -1804,6 +1788,52 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[4],
return BuildVector;
}
+SDValue R600TargetLowering::constBufferLoad(LoadSDNode *LoadNode, int Block,
+ SelectionDAG &DAG) const {
+ SDLoc DL(LoadNode);
+ EVT VT = LoadNode->getValueType(0);
+ SDValue Chain = LoadNode->getChain();
+ SDValue Ptr = LoadNode->getBasePtr();
+ assert (isa<ConstantSDNode>(Ptr));
+
+ //TODO: Support smaller loads
+ if (LoadNode->getMemoryVT().getScalarType() != MVT::i32 || !ISD::isNON_EXTLoad(LoadNode))
+ return SDValue();
+
+ if (LoadNode->getAlignment() < 4)
+ return SDValue();
+
+ int ConstantBlock = ConstantAddressBlock(Block);
+
+ SDValue Slots[4];
+ for (unsigned i = 0; i < 4; i++) {
+ // We want Const position encoded with the following formula :
+ // (((512 + (kc_bank << 12) + const_index) << 2) + chan)
+ // const_index is Ptr computed by llvm using an alignment of 16.
+ // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and
+ // then div by 4 at the ISel step
+ SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
+ DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32));
+ Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr);
+ }
+ EVT NewVT = MVT::v4i32;
+ unsigned NumElements = 4;
+ if (VT.isVector()) {
+ NewVT = VT;
+ NumElements = VT.getVectorNumElements();
+ }
+ SDValue Result = DAG.getBuildVector(NewVT, DL, makeArrayRef(Slots, NumElements));
+ if (!VT.isVector()) {
+ Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result,
+ DAG.getConstant(0, DL, MVT::i32));
+ }
+ SDValue MergedValues[2] = {
+ Result,
+ Chain
+ };
+ return DAG.getMergeValues(MergedValues, DL);
+}
+
//===----------------------------------------------------------------------===//
// Custom DAG Optimizations
//===----------------------------------------------------------------------===//
@@ -2022,6 +2052,16 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG, DL);
return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, N->getVTList(), NewArgs);
}
+
+ case ISD::LOAD: {
+ LoadSDNode *LoadNode = cast<LoadSDNode>(N);
+ SDValue Ptr = LoadNode->getBasePtr();
+ if (LoadNode->getAddressSpace() == AMDGPUAS::PARAM_I_ADDRESS &&
+ isa<ConstantSDNode>(Ptr))
+ return constBufferLoad(LoadNode, AMDGPUAS::CONSTANT_BUFFER_0, DAG);
+ break;
+ }
+
default: break;
}
diff --git a/lib/Target/AMDGPU/R600ISelLowering.h b/lib/Target/AMDGPU/R600ISelLowering.h
index 907d1f10e151..767c3c7bd5bf 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.h
+++ b/lib/Target/AMDGPU/R600ISelLowering.h
@@ -98,9 +98,11 @@ private:
bool isHWTrueValue(SDValue Op) const;
bool isHWFalseValue(SDValue Op) const;
- bool FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src,
- SDValue &Neg, SDValue &Abs, SDValue &Sel, SDValue &Imm,
- SelectionDAG &DAG) const;
+ bool FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src,
+ SDValue &Neg, SDValue &Abs, SDValue &Sel, SDValue &Imm,
+ SelectionDAG &DAG) const;
+ SDValue constBufferLoad(LoadSDNode *LoadNode, int Block,
+ SelectionDAG &DAG) const;
SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
};
diff --git a/lib/Target/AMDGPU/VOP3Instructions.td b/lib/Target/AMDGPU/VOP3Instructions.td
index 17ae08dc6267..26bc5260e17f 100644
--- a/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/lib/Target/AMDGPU/VOP3Instructions.td
@@ -461,17 +461,6 @@ def : GCNPat <
(inst i16:$src0, i16:$src1, i16:$src2, (i1 0))
>;
-def : GCNPat<
- (i32 (op3 (op2 (op1 i16:$src0, i16:$src1), i16:$src2))),
- (inst i16:$src0, i16:$src1, i16:$src2, (i1 0))
->;
-
-def : GCNPat<
- (i64 (op3 (op2 (op1 i16:$src0, i16:$src1), i16:$src2))),
- (REG_SEQUENCE VReg_64,
- (inst i16:$src0, i16:$src1, i16:$src2, (i1 0)), sub0,
- (V_MOV_B32_e32 (i32 0)), sub1)
->;
}
defm: Ternary_i16_Pats<mul, add, V_MAD_U16, zext>;
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 4dfa8477a362..21939d836dc7 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -16,6 +16,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -81,10 +82,12 @@ void NVPTXDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
case ISD::LOAD:
+ case ISD::ATOMIC_LOAD:
if (tryLoad(N))
return;
break;
case ISD::STORE:
+ case ISD::ATOMIC_STORE:
if (tryStore(N))
return;
break;
@@ -834,17 +837,27 @@ static Optional<unsigned> pickOpcodeForVT(
bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
SDLoc dl(N);
- LoadSDNode *LD = cast<LoadSDNode>(N);
+ MemSDNode *LD = cast<MemSDNode>(N);
+ assert(LD->readMem() && "Expected load");
+ LoadSDNode *PlainLoad = dyn_cast<LoadSDNode>(N);
EVT LoadedVT = LD->getMemoryVT();
SDNode *NVPTXLD = nullptr;
// do not support pre/post inc/dec
- if (LD->isIndexed())
+ if (PlainLoad && PlainLoad->isIndexed())
return false;
if (!LoadedVT.isSimple())
return false;
+ AtomicOrdering Ordering = LD->getOrdering();
+ // In order to lower atomic loads with stronger guarantees we would need to
+ // use load.acquire or insert fences. However these features were only added
+ // with PTX ISA 6.0 / sm_70.
+ // TODO: Check if we can actually use the new instructions and implement them.
+ if (isStrongerThanMonotonic(Ordering))
+ return false;
+
// Address Space Setting
unsigned int CodeAddrSpace = getCodeAddrSpace(LD);
if (canLowerToLDG(LD, *Subtarget, CodeAddrSpace, MF)) {
@@ -855,8 +868,9 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
CurDAG->getDataLayout().getPointerSizeInBits(LD->getAddressSpace());
// Volatile Setting
- // - .volatile is only availalble for .global and .shared
- bool isVolatile = LD->isVolatile();
+ // - .volatile is only available for .global and .shared
+ // - .volatile has the same memory synchronization semantics as .relaxed.sys
+ bool isVolatile = LD->isVolatile() || Ordering == AtomicOrdering::Monotonic;
if (CodeAddrSpace != NVPTX::PTXLdStInstCode::GLOBAL &&
CodeAddrSpace != NVPTX::PTXLdStInstCode::SHARED &&
CodeAddrSpace != NVPTX::PTXLdStInstCode::GENERIC)
@@ -882,7 +896,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
fromTypeWidth = 32;
}
- if ((LD->getExtensionType() == ISD::SEXTLOAD))
+ if (PlainLoad && (PlainLoad->getExtensionType() == ISD::SEXTLOAD))
fromType = NVPTX::PTXLdStInstCode::Signed;
else if (ScalarVT.isFloatingPoint())
// f16 uses .b16 as its storage type.
@@ -1691,25 +1705,38 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
SDLoc dl(N);
- StoreSDNode *ST = cast<StoreSDNode>(N);
+ MemSDNode *ST = cast<MemSDNode>(N);
+ assert(ST->writeMem() && "Expected store");
+ StoreSDNode *PlainStore = dyn_cast<StoreSDNode>(N);
+ AtomicSDNode *AtomicStore = dyn_cast<AtomicSDNode>(N);
+ assert((PlainStore || AtomicStore) && "Expected store");
EVT StoreVT = ST->getMemoryVT();
SDNode *NVPTXST = nullptr;
// do not support pre/post inc/dec
- if (ST->isIndexed())
+ if (PlainStore && PlainStore->isIndexed())
return false;
if (!StoreVT.isSimple())
return false;
+ AtomicOrdering Ordering = ST->getOrdering();
+ // In order to lower atomic loads with stronger guarantees we would need to
+ // use store.release or insert fences. However these features were only added
+ // with PTX ISA 6.0 / sm_70.
+ // TODO: Check if we can actually use the new instructions and implement them.
+ if (isStrongerThanMonotonic(Ordering))
+ return false;
+
// Address Space Setting
unsigned int CodeAddrSpace = getCodeAddrSpace(ST);
unsigned int PointerSize =
CurDAG->getDataLayout().getPointerSizeInBits(ST->getAddressSpace());
// Volatile Setting
- // - .volatile is only availalble for .global and .shared
- bool isVolatile = ST->isVolatile();
+ // - .volatile is only available for .global and .shared
+ // - .volatile has the same memory synchronization semantics as .relaxed.sys
+ bool isVolatile = ST->isVolatile() || Ordering == AtomicOrdering::Monotonic;
if (CodeAddrSpace != NVPTX::PTXLdStInstCode::GLOBAL &&
CodeAddrSpace != NVPTX::PTXLdStInstCode::SHARED &&
CodeAddrSpace != NVPTX::PTXLdStInstCode::GENERIC)
@@ -1739,41 +1766,53 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
toType = NVPTX::PTXLdStInstCode::Unsigned;
// Create the machine instruction DAG
- SDValue Chain = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDValue N2 = N->getOperand(2);
+ SDValue Chain = ST->getChain();
+ SDValue Value = PlainStore ? PlainStore->getValue() : AtomicStore->getVal();
+ SDValue BasePtr = ST->getBasePtr();
SDValue Addr;
SDValue Offset, Base;
Optional<unsigned> Opcode;
- MVT::SimpleValueType SourceVT = N1.getNode()->getSimpleValueType(0).SimpleTy;
+ MVT::SimpleValueType SourceVT =
+ Value.getNode()->getSimpleValueType(0).SimpleTy;
- if (SelectDirectAddr(N2, Addr)) {
+ if (SelectDirectAddr(BasePtr, Addr)) {
Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_avar, NVPTX::ST_i16_avar,
NVPTX::ST_i32_avar, NVPTX::ST_i64_avar,
NVPTX::ST_f16_avar, NVPTX::ST_f16x2_avar,
NVPTX::ST_f32_avar, NVPTX::ST_f64_avar);
if (!Opcode)
return false;
- SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
- getI32Imm(CodeAddrSpace, dl), getI32Imm(vecType, dl),
- getI32Imm(toType, dl), getI32Imm(toTypeWidth, dl), Addr,
- Chain };
+ SDValue Ops[] = {Value,
+ getI32Imm(isVolatile, dl),
+ getI32Imm(CodeAddrSpace, dl),
+ getI32Imm(vecType, dl),
+ getI32Imm(toType, dl),
+ getI32Imm(toTypeWidth, dl),
+ Addr,
+ Chain};
NVPTXST = CurDAG->getMachineNode(Opcode.getValue(), dl, MVT::Other, Ops);
- } else if (PointerSize == 64 ? SelectADDRsi64(N2.getNode(), N2, Base, Offset)
- : SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
+ } else if (PointerSize == 64
+ ? SelectADDRsi64(BasePtr.getNode(), BasePtr, Base, Offset)
+ : SelectADDRsi(BasePtr.getNode(), BasePtr, Base, Offset)) {
Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_asi, NVPTX::ST_i16_asi,
NVPTX::ST_i32_asi, NVPTX::ST_i64_asi,
NVPTX::ST_f16_asi, NVPTX::ST_f16x2_asi,
NVPTX::ST_f32_asi, NVPTX::ST_f64_asi);
if (!Opcode)
return false;
- SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
- getI32Imm(CodeAddrSpace, dl), getI32Imm(vecType, dl),
- getI32Imm(toType, dl), getI32Imm(toTypeWidth, dl), Base,
- Offset, Chain };
+ SDValue Ops[] = {Value,
+ getI32Imm(isVolatile, dl),
+ getI32Imm(CodeAddrSpace, dl),
+ getI32Imm(vecType, dl),
+ getI32Imm(toType, dl),
+ getI32Imm(toTypeWidth, dl),
+ Base,
+ Offset,
+ Chain};
NVPTXST = CurDAG->getMachineNode(Opcode.getValue(), dl, MVT::Other, Ops);
- } else if (PointerSize == 64 ? SelectADDRri64(N2.getNode(), N2, Base, Offset)
- : SelectADDRri(N2.getNode(), N2, Base, Offset)) {
+ } else if (PointerSize == 64
+ ? SelectADDRri64(BasePtr.getNode(), BasePtr, Base, Offset)
+ : SelectADDRri(BasePtr.getNode(), BasePtr, Base, Offset)) {
if (PointerSize == 64)
Opcode = pickOpcodeForVT(
SourceVT, NVPTX::ST_i8_ari_64, NVPTX::ST_i16_ari_64,
@@ -1787,10 +1826,15 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
if (!Opcode)
return false;
- SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
- getI32Imm(CodeAddrSpace, dl), getI32Imm(vecType, dl),
- getI32Imm(toType, dl), getI32Imm(toTypeWidth, dl), Base,
- Offset, Chain };
+ SDValue Ops[] = {Value,
+ getI32Imm(isVolatile, dl),
+ getI32Imm(CodeAddrSpace, dl),
+ getI32Imm(vecType, dl),
+ getI32Imm(toType, dl),
+ getI32Imm(toTypeWidth, dl),
+ Base,
+ Offset,
+ Chain};
NVPTXST = CurDAG->getMachineNode(Opcode.getValue(), dl, MVT::Other, Ops);
} else {
if (PointerSize == 64)
@@ -1806,10 +1850,14 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
NVPTX::ST_f32_areg, NVPTX::ST_f64_areg);
if (!Opcode)
return false;
- SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
- getI32Imm(CodeAddrSpace, dl), getI32Imm(vecType, dl),
- getI32Imm(toType, dl), getI32Imm(toTypeWidth, dl), N2,
- Chain };
+ SDValue Ops[] = {Value,
+ getI32Imm(isVolatile, dl),
+ getI32Imm(CodeAddrSpace, dl),
+ getI32Imm(vecType, dl),
+ getI32Imm(toType, dl),
+ getI32Imm(toTypeWidth, dl),
+ BasePtr,
+ Chain};
NVPTXST = CurDAG->getMachineNode(Opcode.getValue(), dl, MVT::Other, Ops);
}
diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp
index e13db08e263c..a0c78e0468c6 100644
--- a/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -47,21 +47,17 @@ STATISTIC(ChecksUnable, "Bounds checks unable to add");
using BuilderTy = IRBuilder<TargetFolder>;
-/// Adds run-time bounds checks to memory accessing instructions.
+/// Gets the conditions under which memory accessing instructions will overflow.
///
/// \p Ptr is the pointer that will be read/written, and \p InstVal is either
/// the result from the load or the value being stored. It is used to determine
/// the size of memory block that is touched.
///
-/// \p GetTrapBB is a callable that returns the trap BB to use on failure.
-///
-/// Returns true if any change was made to the IR, false otherwise.
-template <typename GetTrapBBT>
-static bool instrumentMemAccess(Value *Ptr, Value *InstVal,
- const DataLayout &DL, TargetLibraryInfo &TLI,
- ObjectSizeOffsetEvaluator &ObjSizeEval,
- BuilderTy &IRB, GetTrapBBT GetTrapBB,
- ScalarEvolution &SE) {
+/// Returns the condition under which the access will overflow.
+static Value *getBoundsCheckCond(Value *Ptr, Value *InstVal,
+ const DataLayout &DL, TargetLibraryInfo &TLI,
+ ObjectSizeOffsetEvaluator &ObjSizeEval,
+ BuilderTy &IRB, ScalarEvolution &SE) {
uint64_t NeededSize = DL.getTypeStoreSize(InstVal->getType());
LLVM_DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
<< " bytes\n");
@@ -70,7 +66,7 @@ static bool instrumentMemAccess(Value *Ptr, Value *InstVal,
if (!ObjSizeEval.bothKnown(SizeOffset)) {
++ChecksUnable;
- return false;
+ return nullptr;
}
Value *Size = SizeOffset.first;
@@ -107,13 +103,23 @@ static bool instrumentMemAccess(Value *Ptr, Value *InstVal,
Or = IRB.CreateOr(Cmp1, Or);
}
+ return Or;
+}
+
+/// Adds run-time bounds checks to memory accessing instructions.
+///
+/// \p Or is the condition that should guard the trap.
+///
+/// \p GetTrapBB is a callable that returns the trap BB to use on failure.
+template <typename GetTrapBBT>
+static void insertBoundsCheck(Value *Or, BuilderTy IRB, GetTrapBBT GetTrapBB) {
// check if the comparison is always false
ConstantInt *C = dyn_cast_or_null<ConstantInt>(Or);
if (C) {
++ChecksSkipped;
// If non-zero, nothing to do.
if (!C->getZExtValue())
- return true;
+ return;
}
++ChecksAdded;
@@ -127,12 +133,11 @@ static bool instrumentMemAccess(Value *Ptr, Value *InstVal,
// FIXME: We should really handle this differently to bypass the splitting
// the block.
BranchInst::Create(GetTrapBB(IRB), OldBB);
- return true;
+ return;
}
// Create the conditional branch.
BranchInst::Create(GetTrapBB(IRB), Cont, Or, OldBB);
- return true;
}
static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
@@ -143,11 +148,25 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
// check HANDLE_MEMORY_INST in include/llvm/Instruction.def for memory
// touching instructions
- std::vector<Instruction *> WorkList;
+ SmallVector<std::pair<Instruction *, Value *>, 4> TrapInfo;
for (Instruction &I : instructions(F)) {
- if (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicCmpXchgInst>(I) ||
- isa<AtomicRMWInst>(I))
- WorkList.push_back(&I);
+ Value *Or = nullptr;
+ BuilderTy IRB(I.getParent(), BasicBlock::iterator(&I), TargetFolder(DL));
+ if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+ Or = getBoundsCheckCond(LI->getPointerOperand(), LI, DL, TLI,
+ ObjSizeEval, IRB, SE);
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
+ Or = getBoundsCheckCond(SI->getPointerOperand(), SI->getValueOperand(),
+ DL, TLI, ObjSizeEval, IRB, SE);
+ } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ Or = getBoundsCheckCond(AI->getPointerOperand(), AI->getCompareOperand(),
+ DL, TLI, ObjSizeEval, IRB, SE);
+ } else if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
+ Or = getBoundsCheckCond(AI->getPointerOperand(), AI->getValOperand(), DL,
+ TLI, ObjSizeEval, IRB, SE);
+ }
+ if (Or)
+ TrapInfo.push_back(std::make_pair(&I, Or));
}
// Create a trapping basic block on demand using a callback. Depending on
@@ -176,29 +195,14 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
return TrapBB;
};
- bool MadeChange = false;
- for (Instruction *Inst : WorkList) {
+ // Add the checks.
+ for (const auto &Entry : TrapInfo) {
+ Instruction *Inst = Entry.first;
BuilderTy IRB(Inst->getParent(), BasicBlock::iterator(Inst), TargetFolder(DL));
- if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
- MadeChange |= instrumentMemAccess(LI->getPointerOperand(), LI, DL, TLI,
- ObjSizeEval, IRB, GetTrapBB, SE);
- } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- MadeChange |=
- instrumentMemAccess(SI->getPointerOperand(), SI->getValueOperand(),
- DL, TLI, ObjSizeEval, IRB, GetTrapBB, SE);
- } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
- MadeChange |=
- instrumentMemAccess(AI->getPointerOperand(), AI->getCompareOperand(),
- DL, TLI, ObjSizeEval, IRB, GetTrapBB, SE);
- } else if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst)) {
- MadeChange |=
- instrumentMemAccess(AI->getPointerOperand(), AI->getValOperand(), DL,
- TLI, ObjSizeEval, IRB, GetTrapBB, SE);
- } else {
- llvm_unreachable("unknown Instruction type");
- }
+ insertBoundsCheck(Entry.second, IRB, GetTrapBB);
}
- return MadeChange;
+
+ return !TrapInfo.empty();
}
PreservedAnalyses BoundsCheckingPass::run(Function &F, FunctionAnalysisManager &AM) {
diff --git a/test/CodeGen/AArch64/fcopysign.ll b/test/CodeGen/AArch64/fcopysign.ll
index 6bda33cf76ef..a625a9eb2a6c 100644
--- a/test/CodeGen/AArch64/fcopysign.ll
+++ b/test/CodeGen/AArch64/fcopysign.ll
@@ -5,10 +5,12 @@ target triple = "aarch64--"
declare fp128 @llvm.copysign.f128(fp128, fp128)
-@val = global double zeroinitializer, align 8
+@val_float = global float zeroinitializer, align 4
+@val_double = global double zeroinitializer, align 8
+@val_fp128 = global fp128 zeroinitializer, align 16
; CHECK-LABEL: copysign0
-; CHECK: ldr [[REG:x[0-9]+]], [x8, :lo12:val]
+; CHECK: ldr [[REG:x[0-9]+]], [x8, :lo12:val_double]
; CHECK: and [[ANDREG:x[0-9]+]], [[REG]], #0x8000000000000000
; CHECK: lsr x[[LSRREGNUM:[0-9]+]], [[ANDREG]], #56
; CHECK: bfxil w[[LSRREGNUM]], w{{[0-9]+}}, #0, #7
@@ -16,8 +18,25 @@ declare fp128 @llvm.copysign.f128(fp128, fp128)
; CHECK: ldr q{{[0-9]+}},
define fp128 @copysign0() {
entry:
- %v = load double, double* @val, align 8
+ %v = load double, double* @val_double, align 8
%conv = fpext double %v to fp128
%call = tail call fp128 @llvm.copysign.f128(fp128 0xL00000000000000007FFF000000000000, fp128 %conv) #2
ret fp128 %call
}
+
+; CHECK-LABEL: copysign1
+; CHECK-DAG: ldr [[REG:q[0-9]+]], [x8, :lo12:val_fp128]
+; CHECK-DAG: ldr [[REG:w[0-9]+]], [x8, :lo12:val_float]
+; CHECK: and [[ANDREG:w[0-9]+]], [[REG]], #0x80000000
+; CHECK: lsr w[[LSRREGNUM:[0-9]+]], [[ANDREG]], #24
+; CHECK: bfxil w[[LSRREGNUM]], w{{[0-9]+}}, #0, #7
+; CHECK: strb w[[LSRREGNUM]],
+; CHECK: ldr q{{[0-9]+}},
+define fp128@copysign1() {
+entry:
+ %v0 = load fp128, fp128* @val_fp128, align 16
+ %v1 = load float, float* @val_float, align 4
+ %conv = fpext float %v1 to fp128
+ %call = tail call fp128 @llvm.copysign.f128(fp128 %v0, fp128 %conv)
+ ret fp128 %call
+}
diff --git a/test/CodeGen/AMDGPU/kernel-args.ll b/test/CodeGen/AMDGPU/kernel-args.ll
index 9d1f582f4a88..11067522f857 100644
--- a/test/CodeGen/AMDGPU/kernel-args.ll
+++ b/test/CodeGen/AMDGPU/kernel-args.ll
@@ -16,13 +16,8 @@
; HSA-VI: s_and_b32 s{{[0-9]+}}, [[VAL]], 0xff
-; EG: LSHR T0.X, KC0[2].Y, literal.x,
-; EG-NEXT: MOV * T1.X, KC0[2].Z,
-; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
-
-; CM: LSHR * T0.X, KC0[2].Y, literal.x,
-; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
-; CM-NEXT: MOV * T1.X, KC0[2].Z,
+; EGCM: VTX_READ_8{{.*}} #3
+; EGCM: KC0[2].Y
define amdgpu_kernel void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
%ext = zext i8 %in to i32
store i32 %ext, i32 addrspace(1)* %out, align 4
@@ -92,14 +87,8 @@ define amdgpu_kernel void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 sign
; HSA-VI: s_and_b32 s{{[0-9]+}}, [[VAL]], 0xffff{{$}}
; HSA-VI: flat_store_dword
-
-; EG: LSHR T0.X, KC0[2].Y, literal.x,
-; EG-NEXT: MOV * T1.X, KC0[2].Z,
-; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
-
-; CM: LSHR * T0.X, KC0[2].Y, literal.x,
-; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
-; CM-NEXT: MOV * T1.X, KC0[2].Z,
+; EGCM: VTX_READ_16
+; EGCM: KC0[2].Y
define amdgpu_kernel void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
%ext = zext i16 %in to i32
store i32 %ext, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/mad_uint24.ll b/test/CodeGen/AMDGPU/mad_uint24.ll
index 2c4f7d324a96..3c3371bf9166 100644
--- a/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN
+; RUN: llc < %s -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
@@ -138,3 +138,90 @@ bb18: ; preds = %bb4
store i32 %tmp16, i32 addrspace(1)* %arg
ret void
}
+
+; FUNC-LABEL: {{^}}i8_mad_sat_16:
+; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; The result must be sign-extended
+; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
+; EG: 8
+; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
+; GCN: v_med3_i32 v{{[0-9]}}, [[EXT]],
+define amdgpu_kernel void @i8_mad_sat_16(i8 addrspace(1)* %out, i8 addrspace(1)* %in0, i8 addrspace(1)* %in1, i8 addrspace(1)* %in2, i64 addrspace(5)* %idx) {
+entry:
+ %retval.0.i = load i64, i64 addrspace(5)* %idx
+ %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 %retval.0.i
+ %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 %retval.0.i
+ %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %in2, i64 %retval.0.i
+ %l1 = load i8, i8 addrspace(1)* %arrayidx, align 1
+ %l2 = load i8, i8 addrspace(1)* %arrayidx2, align 1
+ %l3 = load i8, i8 addrspace(1)* %arrayidx4, align 1
+ %conv1.i = sext i8 %l1 to i16
+ %conv3.i = sext i8 %l2 to i16
+ %conv5.i = sext i8 %l3 to i16
+ %mul.i.i.i = mul nsw i16 %conv3.i, %conv1.i
+ %add.i.i = add i16 %mul.i.i.i, %conv5.i
+ %c4 = icmp sgt i16 %add.i.i, -128
+ %cond.i.i = select i1 %c4, i16 %add.i.i, i16 -128
+ %c5 = icmp slt i16 %cond.i.i, 127
+ %cond13.i.i = select i1 %c5, i16 %cond.i.i, i16 127
+ %conv8.i = trunc i16 %cond13.i.i to i8
+ %arrayidx7 = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 %retval.0.i
+ store i8 %conv8.i, i8 addrspace(1)* %arrayidx7, align 1
+ ret void
+}
+
+; FUNC-LABEL: {{^}}i8_mad_32:
+; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; The result must be sign-extended
+; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
+; EG: 8
+; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
+define amdgpu_kernel void @i8_mad_32(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b, i8 addrspace(1)* %c, i64 addrspace(5)* %idx) {
+entry:
+ %retval.0.i = load i64, i64 addrspace(5)* %idx
+ %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 %retval.0.i
+ %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %b, i64 %retval.0.i
+ %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %c, i64 %retval.0.i
+ %la = load i8, i8 addrspace(1)* %arrayidx, align 1
+ %lb = load i8, i8 addrspace(1)* %arrayidx2, align 1
+ %lc = load i8, i8 addrspace(1)* %arrayidx4, align 1
+ %exta = sext i8 %la to i16
+ %extb = sext i8 %lb to i16
+ %extc = sext i8 %lc to i16
+ %mul = mul i16 %exta, %extb
+ %mad = add i16 %mul, %extc
+ %mad_ext = sext i16 %mad to i32
+ store i32 %mad_ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}i8_mad_64:
+; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; The result must be sign-extended
+; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
+; EG: 8
+; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
+define amdgpu_kernel void @i8_mad_64(i64 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b, i8 addrspace(1)* %c, i64 addrspace(5)* %idx) {
+entry:
+ %retval.0.i = load i64, i64 addrspace(5)* %idx
+ %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 %retval.0.i
+ %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %b, i64 %retval.0.i
+ %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %c, i64 %retval.0.i
+ %la = load i8, i8 addrspace(1)* %arrayidx, align 1
+ %lb = load i8, i8 addrspace(1)* %arrayidx2, align 1
+ %lc = load i8, i8 addrspace(1)* %arrayidx4, align 1
+ %exta = sext i8 %la to i16
+ %extb = sext i8 %lb to i16
+ %extc = sext i8 %lc to i16
+ %mul = mul i16 %exta, %extb
+ %mad = add i16 %mul, %extc
+ %mad_ext = sext i16 %mad to i64
+ store i64 %mad_ext, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll b/test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll
index a7adc2ae996e..03f0539e19be 100644
--- a/test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll
+++ b/test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll
@@ -60,8 +60,11 @@ entry:
}
; FUNC-LABEL: {{^}}test_implicit:
-; 36 prepended implicit bytes + 4(out pointer) + 4*4 = 56
-; EG: VTX_READ_32 {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}}, 56
+; 36 prepended implicit bytes + 4(out pointer) + 4*4 = 56 == KC0[3].Z
+; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+.[XYZW]]], [[PTR:T[0-9]+.[XYZW]]]
+; EG-NOT: VTX_READ
+; EG-DAG: MOV {{\*?}} [[VAL]], KC0[3].Z
+; EG-DAG: LSHR {{\*? *}}[[PTR]], KC0[2].Y, literal
define amdgpu_kernel void @test_implicit(i32 addrspace(1)* %out) #1 {
%implicitarg.ptr = call noalias i8 addrspace(7)* @llvm.r600.implicitarg.ptr()
%header.ptr = bitcast i8 addrspace(7)* %implicitarg.ptr to i32 addrspace(7)*
@@ -73,7 +76,7 @@ define amdgpu_kernel void @test_implicit(i32 addrspace(1)* %out) #1 {
; FUNC-LABEL: {{^}}test_implicit_dyn:
; 36 prepended implicit bytes + 8(out pointer + in) = 44
-; EG: VTX_READ_32 {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}}, 44
+; EG: VTX_READ_32 {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}}, 44, #3
define amdgpu_kernel void @test_implicit_dyn(i32 addrspace(1)* %out, i32 %in) #1 {
%implicitarg.ptr = call noalias i8 addrspace(7)* @llvm.r600.implicitarg.ptr()
%header.ptr = bitcast i8 addrspace(7)* %implicitarg.ptr to i32 addrspace(7)*
diff --git a/test/CodeGen/NVPTX/load-store.ll b/test/CodeGen/NVPTX/load-store.ll
new file mode 100644
index 000000000000..03b0109dea20
--- /dev/null
+++ b/test/CodeGen/NVPTX/load-store.ll
@@ -0,0 +1,88 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+; CHECK-LABEL: plain
+define void @plain(i8* %a, i16* %b, i32* %c, i64* %d) local_unnamed_addr {
+ ; CHECK: ld.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load i8, i8* %a
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store i8 %a.add, i8* %a
+
+ ; CHECK: ld.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load i16, i16* %b
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store i16 %b.add, i16* %b
+
+ ; CHECK: ld.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load i32, i32* %c
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store i32 %c.add, i32* %c
+
+ ; CHECK: ld.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load i64, i64* %d
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store i64 %d.add, i64* %d
+
+ ret void
+}
+
+; CHECK-LABEL: volatile
+define void @volatile(i8* %a, i16* %b, i32* %c, i64* %d) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load volatile i8, i8* %a
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store volatile i8 %a.add, i8* %a
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load volatile i16, i16* %b
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store volatile i16 %b.add, i16* %b
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load volatile i32, i32* %c
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile i32 %c.add, i32* %c
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load volatile i64, i64* %d
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store volatile i64 %d.add, i64* %d
+
+ ret void
+}
+
+; CHECK-LABEL: monotonic
+define void @monotonic(i8* %a, i16* %b, i32* %c, i64* %d) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, i8* %a monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, i8* %a monotonic, align 1
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, i16* %b monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, i16* %b monotonic, align 2
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, i32* %c monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, i32* %c monotonic, align 4
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, i64* %d monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, i64* %d monotonic, align 8
+
+ ret void
+}
diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll
index 812d9f50fe3c..36da9386fb06 100644
--- a/test/CodeGen/X86/masked_memop.ll
+++ b/test/CodeGen/X86/masked_memop.ll
@@ -1310,6 +1310,65 @@ define void @trunc_mask(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <4 x
ret void
}
+; This needs to be widened to v4i32.
+; This used to assert in type legalization. PR38436
+; FIXME: The codegen for AVX512 should use KSHIFT to zero the upper bits of the mask.
+define void @widen_masked_store(<3 x i32> %v, <3 x i32>* %p, <3 x i1> %mask) {
+; AVX1-LABEL: widen_masked_store:
+; AVX1: ## %bb.0:
+; AVX1-NEXT: vmovd %edx, %xmm1
+; AVX1-NEXT: vmovd %esi, %xmm2
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
+; AVX1-NEXT: vmaskmovps %xmm0, %xmm1, (%rdi)
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_masked_store:
+; AVX2: ## %bb.0:
+; AVX2-NEXT: vmovd %edx, %xmm1
+; AVX2-NEXT: vmovd %esi, %xmm2
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
+; AVX2-NEXT: vpmaskmovd %xmm0, %xmm1, (%rdi)
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_masked_store:
+; AVX512F: ## %bb.0:
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1
+; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3]
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kshiftlw $12, %k0, %k0
+; AVX512F-NEXT: kshiftrw $12, %k0, %k1
+; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1}
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; SKX-LABEL: widen_masked_store:
+; SKX: ## %bb.0:
+; SKX-NEXT: vpslld $31, %xmm1, %xmm1
+; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
+; SKX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; SKX-NEXT: vmovdqa32 %xmm1, %xmm1 {%k1} {z}
+; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3]
+; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
+; SKX-NEXT: vmovdqa32 %xmm0, (%rdi) {%k1}
+; SKX-NEXT: retq
+ call void @llvm.masked.store.v3i32(<3 x i32> %v, <3 x i32>* %p, i32 16, <3 x i1> %mask)
+ ret void
+}
+declare void @llvm.masked.store.v3i32(<3 x i32>, <3 x i32>*, i32, <3 x i1>)
+
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
declare <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>)
declare <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>*, i32, <4 x i1>, <4 x i64>)
diff --git a/test/Instrumentation/BoundsChecking/many-traps-2.ll b/test/Instrumentation/BoundsChecking/many-traps-2.ll
new file mode 100644
index 000000000000..a6e99586af21
--- /dev/null
+++ b/test/Instrumentation/BoundsChecking/many-traps-2.ll
@@ -0,0 +1,65 @@
+; RUN: opt < %s -bounds-checking -S | FileCheck %s
+@array = internal global [1819 x i16] zeroinitializer, section ".bss,bss"
+@offsets = external dso_local global [10 x i16]
+
+; CHECK-LABEL: @test
+define dso_local void @test() {
+bb1:
+ br label %bb19
+
+bb20:
+ %_tmp819 = load i16, i16* null
+; CHECK: br {{.*}} %trap
+ %_tmp820 = sub nsw i16 9, %_tmp819
+ %_tmp821 = sext i16 %_tmp820 to i64
+ %_tmp822 = getelementptr [10 x i16], [10 x i16]* @offsets, i16 0, i64 %_tmp821
+ %_tmp823 = load i16, i16* %_tmp822
+ br label %bb33
+
+bb34:
+ %_tmp907 = zext i16 %i__7.107.0 to i64
+ %_tmp908 = getelementptr [1819 x i16], [1819 x i16]* @array, i16 0, i64 %_tmp907
+ store i16 0, i16* %_tmp908
+; CHECK: br {{.*}} %trap
+ %_tmp910 = add i16 %i__7.107.0, 1
+ br label %bb33
+
+bb33:
+ %i__7.107.0 = phi i16 [ undef, %bb20 ], [ %_tmp910, %bb34 ]
+ %_tmp913 = add i16 %_tmp823, 191
+ %_tmp914 = icmp ult i16 %i__7.107.0, %_tmp913
+ br i1 %_tmp914, label %bb34, label %bb19
+
+bb19:
+ %_tmp976 = icmp slt i16 0, 10
+ br i1 %_tmp976, label %bb20, label %bb39
+
+bb39:
+ ret void
+}
+
+@e = dso_local local_unnamed_addr global [1 x i16] zeroinitializer, align 1
+
+; CHECK-LABEL: @test2
+define dso_local void @test2() local_unnamed_addr {
+entry:
+ br label %while.cond1.preheader
+
+while.cond1.preheader:
+ %0 = phi i16 [ undef, %entry ], [ %inc, %while.end ]
+ %1 = load i16, i16* undef, align 1
+; CHECK: br {{.*}} %trap
+ br label %while.end
+
+while.end:
+ %inc = add nsw i16 %0, 1
+ %arrayidx = getelementptr inbounds [1 x i16], [1 x i16]* @e, i16 0, i16
+ %0
+ %2 = load i16, i16* %arrayidx, align 1
+; CHECK: or i1
+; CHECK-NEXT: br {{.*}} %trap
+ br i1 false, label %while.end6, label %while.cond1.preheader
+
+while.end6:
+ ret void
+}
diff --git a/test/MC/ELF/extra-section-flags.s b/test/MC/ELF/extra-section-flags.s
deleted file mode 100644
index bde7e1abf8ef..000000000000
--- a/test/MC/ELF/extra-section-flags.s
+++ /dev/null
@@ -1,12 +0,0 @@
-# RUN: llvm-mc -triple x86_64-unknown-unknown -filetype=obj %s -o /dev/null 2>&1 | FileCheck %s
-
-.section .rodata, "ax"
-# CHECK: warning: setting incorrect section attributes for .rodata
-nop
-
-.section .rodata, "a"
-nop
-.section .rodata.cst4, "aM",@progbits,8
-nop
-# CHECK-NOT: warning:
-
diff --git a/test/Transforms/InstSimplify/AndOrXor.ll b/test/Transforms/InstSimplify/AndOrXor.ll
index ed68f1121278..8054eb045364 100644
--- a/test/Transforms/InstSimplify/AndOrXor.ll
+++ b/test/Transforms/InstSimplify/AndOrXor.ll
@@ -967,12 +967,8 @@ define i32 @reversed_not(i32 %a) {
define i64 @shl_or_and1(i32 %a, i1 %b) {
; CHECK-LABEL: @shl_or_and1(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[B:%.*]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP1]], 32
-; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 1
-; CHECK-NEXT: ret i64 [[TMP5]]
+; CHECK-NEXT: ret i64 [[TMP2]]
;
%tmp1 = zext i32 %a to i64
%tmp2 = zext i1 %b to i64
@@ -985,11 +981,8 @@ define i64 @shl_or_and1(i32 %a, i1 %b) {
define i64 @shl_or_and2(i32 %a, i1 %b) {
; CHECK-LABEL: @shl_or_and2(
; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[B:%.*]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[A:%.*]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP1]], 32
-; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 4294967296
-; CHECK-NEXT: ret i64 [[TMP5]]
+; CHECK-NEXT: ret i64 [[TMP3]]
;
%tmp1 = zext i1 %b to i64
%tmp2 = zext i32 %a to i64
@@ -999,15 +992,11 @@ define i64 @shl_or_and2(i32 %a, i1 %b) {
ret i64 %tmp5
}
-; concatinate two 32-bit integers and extract lower 32-bit
+; concatenate two 32-bit integers and extract lower 32-bit
define i64 @shl_or_and3(i32 %a, i32 %b) {
; CHECK-LABEL: @shl_or_and3(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[B:%.*]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP1]], 32
-; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 4294967295
-; CHECK-NEXT: ret i64 [[TMP5]]
+; CHECK-NEXT: ret i64 [[TMP2]]
;
%tmp1 = zext i32 %a to i64
%tmp2 = zext i32 %b to i64
@@ -1017,15 +1006,12 @@ define i64 @shl_or_and3(i32 %a, i32 %b) {
ret i64 %tmp5
}
-; concatinate two 16-bit integers and extract higher 16-bit
+; concatenate two 16-bit integers and extract higher 16-bit
define i32 @shl_or_and4(i16 %a, i16 %b) {
; CHECK-LABEL: @shl_or_and4(
; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[B:%.*]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP1]], 16
-; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], -65536
-; CHECK-NEXT: ret i32 [[TMP5]]
+; CHECK-NEXT: ret i32 [[TMP3]]
;
%tmp1 = zext i16 %a to i32
%tmp2 = zext i16 %b to i32
@@ -1037,12 +1023,8 @@ define i32 @shl_or_and4(i16 %a, i16 %b) {
define i128 @shl_or_and5(i64 %a, i1 %b) {
; CHECK-LABEL: @shl_or_and5(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i64 [[A:%.*]] to i128
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[B:%.*]] to i128
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i128 [[TMP1]], 64
-; CHECK-NEXT: [[TMP4:%.*]] = or i128 [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = and i128 [[TMP4]], 1
-; CHECK-NEXT: ret i128 [[TMP5]]
+; CHECK-NEXT: ret i128 [[TMP2]]
;
%tmp1 = zext i64 %a to i128
%tmp2 = zext i1 %b to i128
@@ -1108,12 +1090,8 @@ define i32 @shl_or_and8(i16 %a, i16 %b) {
define <2 x i64> @shl_or_and1v(<2 x i32> %a, <2 x i1> %b) {
; CHECK-LABEL: @shl_or_and1v(
-; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i1> [[B:%.*]] to <2 x i64>
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <2 x i64> [[TMP1]], <i64 32, i64 32>
-; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i64> [[TMP3]], [[TMP2]]
-; CHECK-NEXT: [[TMP5:%.*]] = and <2 x i64> [[TMP4]], <i64 1, i64 1>
-; CHECK-NEXT: ret <2 x i64> [[TMP5]]
+; CHECK-NEXT: ret <2 x i64> [[TMP2]]
;
%tmp1 = zext <2 x i32> %a to <2 x i64>
%tmp2 = zext <2 x i1> %b to <2 x i64>
@@ -1126,11 +1104,8 @@ define <2 x i64> @shl_or_and1v(<2 x i32> %a, <2 x i1> %b) {
define <2 x i64> @shl_or_and2v(<2 x i32> %a, <2 x i1> %b) {
; CHECK-LABEL: @shl_or_and2v(
; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i1> [[B:%.*]] to <2 x i64>
-; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <2 x i64> [[TMP1]], <i64 32, i64 32>
-; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i64> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = and <2 x i64> [[TMP4]], <i64 4294967296, i64 4294967296>
-; CHECK-NEXT: ret <2 x i64> [[TMP5]]
+; CHECK-NEXT: ret <2 x i64> [[TMP3]]
;
%tmp1 = zext <2 x i1> %b to <2 x i64>
%tmp2 = zext <2 x i32> %a to <2 x i64>
diff --git a/test/Transforms/InstSimplify/floating-point-compare.ll b/test/Transforms/InstSimplify/floating-point-compare.ll
index b1444bb5a1e2..bc5c58a698e3 100644
--- a/test/Transforms/InstSimplify/floating-point-compare.ll
+++ b/test/Transforms/InstSimplify/floating-point-compare.ll
@@ -266,13 +266,15 @@ define i1 @orderedLessZeroMinNum(float, float) {
ret i1 %uge
}
-; FIXME: This is wrong.
; PR37776: https://bugs.llvm.org/show_bug.cgi?id=37776
; exp() may return nan, leaving %1 as the unknown result, so we can't simplify.
define i1 @orderedLessZeroMaxNum(float, float) {
; CHECK-LABEL: @orderedLessZeroMaxNum(
-; CHECK-NEXT: ret i1 true
+; CHECK-NEXT: [[A:%.*]] = call float @llvm.exp.f32(float [[TMP0:%.*]])
+; CHECK-NEXT: [[B:%.*]] = call float @llvm.maxnum.f32(float [[A]], float [[TMP1:%.*]])
+; CHECK-NEXT: [[UGE:%.*]] = fcmp uge float [[B]], 0.000000e+00
+; CHECK-NEXT: ret i1 [[UGE]]
;
%a = call float @llvm.exp.f32(float %0)
%b = call float @llvm.maxnum.f32(float %a, float %1)
diff --git a/test/Transforms/NewGVN/pair_jumpthread.ll b/test/Transforms/NewGVN/pair_jumpthread.ll
index 9e55cda82eb1..65d94e1f6353 100644
--- a/test/Transforms/NewGVN/pair_jumpthread.ll
+++ b/test/Transforms/NewGVN/pair_jumpthread.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -newgvn -S | FileCheck %s
; RUN: opt < %s -newgvn -jump-threading -S | FileCheck --check-prefix=CHECK-JT %s
-; This test is expected to fail until the transformation is committed.
-; XFAIL: *
define signext i32 @testBI(i32 signext %v) {
; Test with std::pair<bool, int>