aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h37
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp113
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp34
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp43
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp626
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h42
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp117
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp45
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h41
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp127
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h16
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h32
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp99
18 files changed, 1141 insertions, 269 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
index 876526093591..03cbd272757e 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -13,7 +13,6 @@
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
-#include "AArch64ExpandImm.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/bit.h"
@@ -237,17 +236,17 @@ static inline bool processLogicalImmediate(uint64_t Imm, unsigned RegSize,
Imm &= Mask;
if (isShiftedMask_64(Imm)) {
- I = countTrailingZeros(Imm);
+ I = llvm::countr_zero(Imm);
assert(I < 64 && "undefined behavior");
- CTO = countTrailingOnes(Imm >> I);
+ CTO = llvm::countr_one(Imm >> I);
} else {
Imm |= ~Mask;
if (!isShiftedMask_64(~Imm))
return false;
- unsigned CLO = countLeadingOnes(Imm);
+ unsigned CLO = llvm::countl_one(Imm);
I = 64 - CLO;
- CTO = CLO + countTrailingOnes(Imm) - (64 - Size);
+ CTO = CLO + llvm::countr_one(Imm) - (64 - Size);
}
// Encode in Immr the number of RORs it would take to get *from* 0^m 1^n
@@ -298,7 +297,7 @@ static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) {
unsigned imms = val & 0x3f;
assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
- int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
+ int len = 31 - llvm::countl_zero((N << 6) | (~imms & 0x3f));
assert(len >= 0 && "undefined logical immediate encoding");
unsigned size = (1 << len);
unsigned R = immr & (size - 1);
@@ -327,7 +326,7 @@ static inline bool isValidDecodeLogicalImmediate(uint64_t val,
if (regSize == 32 && N != 0) // undefined logical immediate encoding
return false;
- int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
+ int len = 31 - llvm::countl_zero((N << 6) | (~imms & 0x3f));
if (len < 0) // undefined logical immediate encoding
return false;
unsigned size = (1 << len);
@@ -592,6 +591,27 @@ static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
// cmode: 1110, op: 1
static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
+#if defined(_MSC_VER) && _MSC_VER == 1937 && !defined(__clang__) && \
+ defined(_M_ARM64)
+ // The MSVC compiler 19.37 for ARM64 has an optimization bug that
+ // causes an incorrect behavior with the orignal version. Work around
+ // by using a slightly different variation.
+ // https://developercommunity.visualstudio.com/t/C-ARM64-compiler-optimization-bug/10481261
+ constexpr uint64_t Mask = 0xFFULL;
+ uint64_t ByteA = (Imm >> 56) & Mask;
+ uint64_t ByteB = (Imm >> 48) & Mask;
+ uint64_t ByteC = (Imm >> 40) & Mask;
+ uint64_t ByteD = (Imm >> 32) & Mask;
+ uint64_t ByteE = (Imm >> 24) & Mask;
+ uint64_t ByteF = (Imm >> 16) & Mask;
+ uint64_t ByteG = (Imm >> 8) & Mask;
+ uint64_t ByteH = Imm & Mask;
+
+ return (ByteA == 0ULL || ByteA == Mask) && (ByteB == 0ULL || ByteB == Mask) &&
+ (ByteC == 0ULL || ByteC == Mask) && (ByteD == 0ULL || ByteD == Mask) &&
+ (ByteE == 0ULL || ByteE == Mask) && (ByteF == 0ULL || ByteF == Mask) &&
+ (ByteG == 0ULL || ByteG == Mask) && (ByteH == 0ULL || ByteH == Mask);
+#else
uint64_t ByteA = Imm & 0xff00000000000000ULL;
uint64_t ByteB = Imm & 0x00ff000000000000ULL;
uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
@@ -609,6 +629,7 @@ static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
(ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
(ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
(ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
+#endif
}
static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) {
@@ -755,7 +776,7 @@ static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) {
template <typename T>
static inline bool isSVEMaskOfIdenticalElements(int64_t Imm) {
auto Parts = bit_cast<std::array<T, sizeof(int64_t) / sizeof(T)>>(Imm);
- return all_of(Parts, [&](T Elem) { return Elem == Parts[0]; });
+ return llvm::all_equal(Parts);
}
/// Returns true if Imm is valid for CPY/DUP.
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index dbb8e85713cb..30ef3680ae79 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -10,7 +10,6 @@
#include "MCTargetDesc/AArch64MCExpr.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "Utils/AArch64BaseInfo.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
@@ -22,11 +21,13 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/TargetRegistry.h"
-#include "llvm/Support/EndianStream.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/TargetParser/Triple.h"
using namespace llvm;
namespace {
@@ -39,14 +40,15 @@ protected:
public:
AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
- : MCAsmBackend(IsLittleEndian ? support::little : support::big),
+ : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
+ : llvm::endianness::big),
TheTriple(TT) {}
unsigned getNumFixupKinds() const override {
return AArch64::NumTargetFixupKinds;
}
- Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
+ std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
@@ -65,6 +67,7 @@ public:
{"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
{"fixup_aarch64_movw", 5, 16, 0},
{"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
+ {"fixup_aarch64_pcrel_branch16", 5, 16, PCRelFlagVal},
{"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
{"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
{"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}};
@@ -98,7 +101,8 @@ public:
unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
- const MCValue &Target) override;
+ const MCValue &Target,
+ const MCSubtargetInfo *STI) override;
};
} // end anonymous namespace
@@ -118,6 +122,7 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
case AArch64::fixup_aarch64_movw:
case AArch64::fixup_aarch64_pcrel_branch14:
+ case AArch64::fixup_aarch64_pcrel_branch16:
case AArch64::fixup_aarch64_add_imm12:
case AArch64::fixup_aarch64_ldst_imm12_scale1:
case AArch64::fixup_aarch64_ldst_imm12_scale2:
@@ -155,7 +160,7 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
default:
llvm_unreachable("Unknown fixup kind!");
case AArch64::fixup_aarch64_pcrel_adr_imm21:
- if (SignedValue > 2097151 || SignedValue < -2097152)
+ if (!isInt<21>(SignedValue))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
return AdrImmBits(Value & 0x1fffffULL);
case AArch64::fixup_aarch64_pcrel_adrp_imm21:
@@ -168,8 +173,8 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
case AArch64::fixup_aarch64_ldr_pcrel_imm19:
case AArch64::fixup_aarch64_pcrel_branch19:
- // Signed 21-bit immediate
- if (SignedValue > 2097151 || SignedValue < -2097152)
+ // Signed 19-bit immediate which gets multiplied by 4
+ if (!isInt<21>(SignedValue))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0x3)
Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
@@ -180,14 +185,14 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
Value &= 0xfff;
// Unsigned 12-bit immediate
- if (Value >= 0x1000)
+ if (!isUInt<12>(Value))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
return Value;
case AArch64::fixup_aarch64_ldst_imm12_scale2:
if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
Value &= 0xfff;
// Unsigned 12-bit immediate which gets multiplied by 2
- if (Value >= 0x2000)
+ if (!isUInt<13>(Value))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0x1)
Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
@@ -196,7 +201,7 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
Value &= 0xfff;
// Unsigned 12-bit immediate which gets multiplied by 4
- if (Value >= 0x4000)
+ if (!isUInt<14>(Value))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0x3)
Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
@@ -205,7 +210,7 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
Value &= 0xfff;
// Unsigned 12-bit immediate which gets multiplied by 8
- if (Value >= 0x8000)
+ if (!isUInt<15>(Value))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0x7)
Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
@@ -214,7 +219,7 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
Value &= 0xfff;
// Unsigned 12-bit immediate which gets multiplied by 16
- if (Value >= 0x10000)
+ if (!isUInt<16>(Value))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0xf)
Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
@@ -305,16 +310,34 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
}
case AArch64::fixup_aarch64_pcrel_branch14:
// Signed 16-bit immediate
- if (SignedValue > 32767 || SignedValue < -32768)
+ if (!isInt<16>(SignedValue))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
// Low two bits are not encoded (4-byte alignment assumed).
if (Value & 0x3)
Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
return (Value >> 2) & 0x3fff;
+ case AArch64::fixup_aarch64_pcrel_branch16:
+ // Unsigned PC-relative offset, so invert the negative immediate.
+ SignedValue = -SignedValue;
+ Value = static_cast<uint64_t>(SignedValue);
+ // Check valid 18-bit unsigned range.
+ if (SignedValue < 0 || SignedValue > ((1 << 18) - 1))
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ // Low two bits are not encoded (4-byte alignment assumed).
+ if (Value & 0b11)
+ Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
+ return (Value >> 2) & 0xffff;
case AArch64::fixup_aarch64_pcrel_branch26:
case AArch64::fixup_aarch64_pcrel_call26:
+ if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
+ // MSVC link.exe and lld do not support this relocation type
+ // with a non-zero offset
+ Ctx.reportError(Fixup.getLoc(),
+ "cannot perform a PC-relative fixup with a non-zero "
+ "symbol offset");
+ }
// Signed 28-bit immediate
- if (SignedValue > 134217727 || SignedValue < -134217728)
+ if (!isInt<28>(SignedValue))
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
// Low two bits are not encoded (4-byte alignment assumed).
if (Value & 0x3)
@@ -330,9 +353,10 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
}
}
-Optional<MCFixupKind> AArch64AsmBackend::getFixupKind(StringRef Name) const {
+std::optional<MCFixupKind>
+AArch64AsmBackend::getFixupKind(StringRef Name) const {
if (!TheTriple.isOSBinFormatELF())
- return None;
+ return std::nullopt;
unsigned Type = llvm::StringSwitch<unsigned>(Name)
#define ELF_RELOC(X, Y) .Case(#X, Y)
@@ -344,14 +368,14 @@ Optional<MCFixupKind> AArch64AsmBackend::getFixupKind(StringRef Name) const {
.Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
.Default(-1u);
if (Type == -1u)
- return None;
+ return std::nullopt;
return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
}
/// getFixupKindContainereSizeInBytes - The number of bytes of the
/// container involved in big endian or 0 if the item is little endian
unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
- if (Endian == support::little)
+ if (Endian == llvm::endianness::little)
return 0;
switch (Kind) {
@@ -369,6 +393,7 @@ unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) con
case AArch64::fixup_aarch64_movw:
case AArch64::fixup_aarch64_pcrel_branch14:
+ case AArch64::fixup_aarch64_pcrel_branch16:
case AArch64::fixup_aarch64_add_imm12:
case AArch64::fixup_aarch64_ldst_imm12_scale1:
case AArch64::fixup_aarch64_ldst_imm12_scale2:
@@ -391,6 +416,19 @@ void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
MutableArrayRef<char> Data, uint64_t Value,
bool IsResolved,
const MCSubtargetInfo *STI) const {
+ if (Fixup.getTargetKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) {
+ auto RefKind = static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
+ if (SymLoc == AArch64AuthMCExpr::VK_AUTH ||
+ SymLoc == AArch64AuthMCExpr::VK_AUTHADDR) {
+ assert(Value == 0);
+ const auto *Expr = cast<AArch64AuthMCExpr>(Fixup.getValue());
+ Value = (uint64_t(Expr->getDiscriminator()) << 32) |
+ (uint64_t(Expr->getKey()) << 60) |
+ (uint64_t(Expr->hasAddressDiversity()) << 63);
+ }
+ }
+
if (!Value)
return; // Doesn't change encoding.
unsigned Kind = Fixup.getKind();
@@ -470,13 +508,14 @@ bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
// We are properly aligned, so write NOPs as requested.
Count /= 4;
for (uint64_t i = 0; i != Count; ++i)
- support::endian::write<uint32_t>(OS, 0xd503201f, Endian);
+ OS.write("\x1f\x20\x03\xd5", 4);
return true;
}
bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
const MCFixup &Fixup,
- const MCValue &Target) {
+ const MCValue &Target,
+ const MCSubtargetInfo *STI) {
unsigned Kind = Fixup.getKind();
if (Kind >= FirstLiteralRelocationKind)
return true;
@@ -563,10 +602,14 @@ public:
}
/// Generate the compact unwind encoding from the CFI directives.
- uint32_t generateCompactUnwindEncoding(
- ArrayRef<MCCFIInstruction> Instrs) const override {
+ uint32_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
+ const MCContext *Ctxt) const override {
+ ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
if (Instrs.empty())
return CU::UNWIND_ARM64_MODE_FRAMELESS;
+ if (!isDarwinCanonicalPersonality(FI->Personality) &&
+ !Ctxt->emitCompactUnwindNonCanonical())
+ return CU::UNWIND_ARM64_MODE_DWARF;
bool HasFP = false;
unsigned StackSize = 0;
@@ -592,17 +635,18 @@ public:
if (XReg != AArch64::FP)
return CU::UNWIND_ARM64_MODE_DWARF;
- assert(XReg == AArch64::FP && "Invalid frame pointer!");
- assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
+ if (i + 2 >= e)
+ return CU::UNWIND_ARM64_MODE_DWARF;
const MCCFIInstruction &LRPush = Instrs[++i];
- assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
- "Link register not pushed!");
+ if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
+ return CU::UNWIND_ARM64_MODE_DWARF;
const MCCFIInstruction &FPPush = Instrs[++i];
- assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
- "Frame pointer not pushed!");
+ if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
+ return CU::UNWIND_ARM64_MODE_DWARF;
- assert(FPPush.getOffset() + 8 == LRPush.getOffset());
+ if (FPPush.getOffset() + 8 != LRPush.getOffset())
+ return CU::UNWIND_ARM64_MODE_DWARF;
CurOffset = FPPush.getOffset();
unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
@@ -611,8 +655,8 @@ public:
LRReg = getXRegFromWReg(LRReg);
FPReg = getXRegFromWReg(FPReg);
- assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
- "Pushing invalid registers for frame!");
+ if (LRReg != AArch64::LR || FPReg != AArch64::FP)
+ return CU::UNWIND_ARM64_MODE_DWARF;
// Indicate that the function has a frame.
CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
@@ -620,7 +664,8 @@ public:
break;
}
case MCCFIInstruction::OpDefCfaOffset: {
- assert(StackSize == 0 && "We already have the CFA offset!");
+ if (StackSize != 0)
+ return CU::UNWIND_ARM64_MODE_DWARF;
StackSize = std::abs(Inst.getOffset());
break;
}
@@ -743,7 +788,7 @@ public:
std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const override {
- return createAArch64WinCOFFObjectWriter();
+ return createAArch64WinCOFFObjectWriter(TheTriple);
}
};
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index 2f9c17245b5f..6e074b6a63c4 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -34,9 +34,13 @@ public:
~AArch64ELFObjectWriter() override = default;
+ MCSectionELF *getMemtagRelocsSection(MCContext &Ctx) const override;
+
protected:
unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup, bool IsPCRel) const override;
+ bool needsRelocateWithSymbol(const MCValue &Val, const MCSymbol &Sym,
+ unsigned Type) const override;
bool IsILP32;
};
@@ -116,7 +120,8 @@ unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
assert((!Target.getSymA() ||
Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None ||
- Target.getSymA()->getKind() == MCSymbolRefExpr::VK_PLT) &&
+ Target.getSymA()->getKind() == MCSymbolRefExpr::VK_PLT ||
+ Target.getSymA()->getKind() == MCSymbolRefExpr::VK_GOTPCREL) &&
"Should only be expression-level modifiers here");
assert((!Target.getSymB() ||
@@ -182,6 +187,10 @@ unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
return R_CLS(LD_PREL_LO19);
case AArch64::fixup_aarch64_pcrel_branch14:
return R_CLS(TSTBR14);
+ case AArch64::fixup_aarch64_pcrel_branch16:
+ Ctx.reportError(Fixup.getLoc(),
+ "relocation of PAC/AUT instructions is not supported");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_pcrel_branch19:
return R_CLS(CONDBR19);
default:
@@ -198,15 +207,22 @@ unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
case FK_Data_2:
return R_CLS(ABS16);
case FK_Data_4:
- return R_CLS(ABS32);
+ return (!IsILP32 &&
+ Target.getAccessVariant() == MCSymbolRefExpr::VK_GOTPCREL)
+ ? ELF::R_AARCH64_GOTPCREL32
+ : R_CLS(ABS32);
case FK_Data_8:
if (IsILP32) {
Ctx.reportError(Fixup.getLoc(),
"ILP32 8 byte absolute data "
"relocation not supported (LP64 eqv: ABS64)");
return ELF::R_AARCH64_NONE;
- } else
+ } else {
+ if (RefKind == AArch64MCExpr::VK_AUTH ||
+ RefKind == AArch64MCExpr::VK_AUTHADDR)
+ return ELF::R_AARCH64_AUTH_ABS64;
return ELF::R_AARCH64_ABS64;
+ }
case AArch64::fixup_aarch64_add_imm12:
if (RefKind == AArch64MCExpr::VK_DTPREL_HI12)
return R_CLS(TLSLD_ADD_DTPREL_HI12);
@@ -453,6 +469,18 @@ unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
llvm_unreachable("Unimplemented fixup -> relocation");
}
+bool AArch64ELFObjectWriter::needsRelocateWithSymbol(const MCValue &Val,
+ const MCSymbol &,
+ unsigned) const {
+ return (Val.getRefKind() & AArch64MCExpr::VK_GOT) == AArch64MCExpr::VK_GOT;
+}
+
+MCSectionELF *
+AArch64ELFObjectWriter::getMemtagRelocsSection(MCContext &Ctx) const {
+ return Ctx.getELFSection(".memtag.globals.static",
+ ELF::SHT_AARCH64_MEMTAG_GLOBALS_STATIC, 0);
+}
+
std::unique_ptr<MCObjectTargetWriter>
llvm::createAArch64ELFObjectWriter(uint8_t OSABI, bool IsILP32) {
return std::make_unique<AArch64ELFObjectWriter>(OSABI, IsILP32);
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index 46edb12959d2..ad21f2673a64 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -18,7 +18,6 @@
#include "AArch64WinCOFFStreamer.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCAsmBackend.h"
@@ -37,6 +36,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
using namespace llvm;
@@ -104,9 +104,50 @@ class AArch64TargetAsmStreamer : public AArch64TargetStreamer {
void emitARM64WinCFITrapFrame() override { OS << "\t.seh_trap_frame\n"; }
void emitARM64WinCFIMachineFrame() override { OS << "\t.seh_pushframe\n"; }
void emitARM64WinCFIContext() override { OS << "\t.seh_context\n"; }
+ void emitARM64WinCFIECContext() override { OS << "\t.seh_ec_context\n"; }
void emitARM64WinCFIClearUnwoundToCall() override {
OS << "\t.seh_clear_unwound_to_call\n";
}
+ void emitARM64WinCFIPACSignLR() override {
+ OS << "\t.seh_pac_sign_lr\n";
+ }
+
+ void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg\tx" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_p\tx" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegD(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg\td" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegDP(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_p\td" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegQ(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg\tq" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_p\tq" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegIX(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_x\tx" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegIPX(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_px\tx" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegDX(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_x\td" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegDPX(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_px\td" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegQX(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_x\tq" << Reg << ", " << Offset << "\n";
+ }
+ void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset) override {
+ OS << "\t.seh_save_any_reg_px\tq" << Reg << ", " << Offset << "\n";
+ }
public:
AArch64TargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
index 767dd8805520..fdee2d5ad2bf 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
@@ -43,6 +43,11 @@ enum Fixups {
// The high 14 bits of a 21-bit pc-relative immediate.
fixup_aarch64_pcrel_branch14,
+ // The high 16 bits of a 18-bit unsigned PC-relative immediate. Used by
+ // pointer authentication, only within a function, so no relocation can be
+ // generated.
+ fixup_aarch64_pcrel_branch16,
+
// The high 19 bits of a 21-bit pc-relative immediate. Same encoding as
// fixup_aarch64_pcrel_adrhi, except this is use by b.cc and generates
// relocations directly when necessary.
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
index ee0870d9ef7a..c5de5b4de4ae 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
@@ -59,9 +59,17 @@ bool AArch64InstPrinter::applyTargetSpecificCLOption(StringRef Opt) {
return false;
}
-void AArch64InstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
- // This is for .cfi directives.
- OS << getRegisterName(RegNo);
+void AArch64InstPrinter::printRegName(raw_ostream &OS, MCRegister Reg) const {
+ markup(OS, Markup::Register) << getRegisterName(Reg);
+}
+
+void AArch64InstPrinter::printRegName(raw_ostream &OS, MCRegister Reg,
+ unsigned AltIdx) const {
+ markup(OS, Markup::Register) << getRegisterName(Reg, AltIdx);
+}
+
+StringRef AArch64InstPrinter::getRegName(MCRegister Reg) const {
+ return getRegisterName(Reg);
}
void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
@@ -77,6 +85,18 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
return;
}
+ if (Opcode == AArch64::SYSPxt || Opcode == AArch64::SYSPxt_XZR)
+ if (printSyspAlias(MI, STI, O)) {
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ // RPRFM overlaps PRFM (reg), so try to print it as RPRFM here.
+ if ((Opcode == AArch64::PRFMroX) || (Opcode == AArch64::PRFMroW)) {
+ if (printRangePrefetchAlias(MI, STI, O, Annot))
+ return;
+ }
+
// SBFM/UBFM should print to a nicer aliased form if possible.
if (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri ||
Opcode == AArch64::UBFMXri || Opcode == AArch64::UBFMWri) {
@@ -113,8 +133,10 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
}
if (AsmMnemonic) {
- O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
- << ", " << getRegisterName(getWRegFromXReg(Op1.getReg()));
+ O << '\t' << AsmMnemonic << '\t';
+ printRegName(O, Op0.getReg());
+ O << ", ";
+ printRegName(O, getWRegFromXReg(Op1.getReg()));
printAnnotation(O, Annot);
return;
}
@@ -149,8 +171,12 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
shift = immr;
}
if (AsmMnemonic) {
- O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
- << ", " << getRegisterName(Op1.getReg()) << ", #" << shift;
+ O << '\t' << AsmMnemonic << '\t';
+ printRegName(O, Op0.getReg());
+ O << ", ";
+ printRegName(O, Op1.getReg());
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << shift;
printAnnotation(O, Annot);
return;
}
@@ -158,17 +184,27 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
// SBFIZ/UBFIZ aliases
if (Op2.getImm() > Op3.getImm()) {
- O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t'
- << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
- << ", #" << (Is64Bit ? 64 : 32) - Op2.getImm() << ", #" << Op3.getImm() + 1;
+ O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t';
+ printRegName(O, Op0.getReg());
+ O << ", ";
+ printRegName(O, Op1.getReg());
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << (Is64Bit ? 64 : 32) - Op2.getImm();
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << Op3.getImm() + 1;
printAnnotation(O, Annot);
return;
}
// Otherwise SBFX/UBFX is the preferred form
- O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t'
- << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
- << ", #" << Op2.getImm() << ", #" << Op3.getImm() - Op2.getImm() + 1;
+ O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t';
+ printRegName(O, Op0.getReg());
+ O << ", ";
+ printRegName(O, Op1.getReg());
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << Op2.getImm();
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << Op3.getImm() - Op2.getImm() + 1;
printAnnotation(O, Annot);
return;
}
@@ -180,15 +216,18 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
int ImmS = MI->getOperand(4).getImm();
if ((Op2.getReg() == AArch64::WZR || Op2.getReg() == AArch64::XZR) &&
- (ImmR == 0 || ImmS < ImmR) &&
- STI.getFeatureBits()[AArch64::HasV8_2aOps]) {
+ (ImmR == 0 || ImmS < ImmR) && STI.hasFeature(AArch64::HasV8_2aOps)) {
// BFC takes precedence over its entire range, sligtly differently to BFI.
int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
int LSB = (BitWidth - ImmR) % BitWidth;
int Width = ImmS + 1;
- O << "\tbfc\t" << getRegisterName(Op0.getReg())
- << ", #" << LSB << ", #" << Width;
+ O << "\tbfc\t";
+ printRegName(O, Op0.getReg());
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << LSB;
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << Width;
printAnnotation(O, Annot);
return;
} else if (ImmS < ImmR) {
@@ -197,8 +236,14 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
int LSB = (BitWidth - ImmR) % BitWidth;
int Width = ImmS + 1;
- O << "\tbfi\t" << getRegisterName(Op0.getReg()) << ", "
- << getRegisterName(Op2.getReg()) << ", #" << LSB << ", #" << Width;
+ O << "\tbfi\t";
+ printRegName(O, Op0.getReg());
+ O << ", ";
+ printRegName(O, Op2.getReg());
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << LSB;
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << Width;
printAnnotation(O, Annot);
return;
}
@@ -206,9 +251,14 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
int LSB = ImmR;
int Width = ImmS - ImmR + 1;
// Otherwise BFXIL the preferred form
- O << "\tbfxil\t"
- << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op2.getReg())
- << ", #" << LSB << ", #" << Width;
+ O << "\tbfxil\t";
+ printRegName(O, Op0.getReg());
+ O << ", ";
+ printRegName(O, Op2.getReg());
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << LSB;
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << Width;
printAnnotation(O, Annot);
return;
}
@@ -224,18 +274,46 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
else
O << "\tmovn\t";
- O << getRegisterName(MI->getOperand(0).getReg()) << ", #";
- MI->getOperand(1).getExpr()->print(O, &MAI);
+ printRegName(O, MI->getOperand(0).getReg());
+ O << ", ";
+ {
+ WithMarkup M = markup(O, Markup::Immediate);
+ O << "#";
+ MI->getOperand(1).getExpr()->print(O, &MAI);
+ }
return;
}
if ((Opcode == AArch64::MOVKXi || Opcode == AArch64::MOVKWi) &&
MI->getOperand(2).isExpr()) {
- O << "\tmovk\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #";
- MI->getOperand(2).getExpr()->print(O, &MAI);
+ O << "\tmovk\t";
+ printRegName(O, MI->getOperand(0).getReg());
+ O << ", ";
+ {
+ WithMarkup M = markup(O, Markup::Immediate);
+ O << "#";
+ MI->getOperand(2).getExpr()->print(O, &MAI);
+ }
return;
}
+ auto PrintMovImm = [&](uint64_t Value, int RegWidth) {
+ int64_t SExtVal = SignExtend64(Value, RegWidth);
+ O << "\tmov\t";
+ printRegName(O, MI->getOperand(0).getReg());
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << formatImm(SExtVal);
+ if (CommentStream) {
+ // Do the opposite to that used for instruction operands.
+ if (getPrintImmHex())
+ *CommentStream << '=' << formatDec(SExtVal) << '\n';
+ else {
+ uint64_t Mask = maskTrailingOnes<uint64_t>(RegWidth);
+ *CommentStream << '=' << formatHex(SExtVal & Mask) << '\n';
+ }
+ }
+ };
+
// MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their
// domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 >
// MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction
@@ -249,8 +327,7 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
if (AArch64_AM::isMOVZMovAlias(Value, Shift,
Opcode == AArch64::MOVZXi ? 64 : 32)) {
- O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
- << formatImm(SignExtend64(Value, RegWidth));
+ PrintMovImm(Value, RegWidth);
return;
}
}
@@ -264,8 +341,7 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
Value = Value & 0xffffffff;
if (AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth)) {
- O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
- << formatImm(SignExtend64(Value, RegWidth));
+ PrintMovImm(Value, RegWidth);
return;
}
}
@@ -278,18 +354,11 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
uint64_t Value = AArch64_AM::decodeLogicalImmediate(
MI->getOperand(2).getImm(), RegWidth);
if (!AArch64_AM::isAnyMOVWMovAlias(Value, RegWidth)) {
- O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
- << formatImm(SignExtend64(Value, RegWidth));
+ PrintMovImm(Value, RegWidth);
return;
}
}
- if (Opcode == AArch64::CompilerBarrier) {
- O << '\t' << MAI.getCommentString() << " COMPILER BARRIER";
- printAnnotation(O, Annot);
- return;
- }
-
if (Opcode == AArch64::SPACE) {
O << '\t' << MAI.getCommentString() << " SPACE "
<< MI->getOperand(1).getImm();
@@ -705,10 +774,9 @@ static const LdStNInstrDesc LdStNInstInfo[] = {
};
static const LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) {
- unsigned Idx;
- for (Idx = 0; Idx != array_lengthof(LdStNInstInfo); ++Idx)
- if (LdStNInstInfo[Idx].Opcode == Opcode)
- return &LdStNInstInfo[Idx];
+ for (const auto &Info : LdStNInstInfo)
+ if (Info.Opcode == Opcode)
+ return &Info;
return nullptr;
}
@@ -722,14 +790,15 @@ void AArch64AppleInstPrinter::printInst(const MCInst *MI, uint64_t Address,
bool IsTbx;
if (isTblTbxInstruction(MI->getOpcode(), Layout, IsTbx)) {
- O << "\t" << (IsTbx ? "tbx" : "tbl") << Layout << '\t'
- << getRegisterName(MI->getOperand(0).getReg(), AArch64::vreg) << ", ";
+ O << "\t" << (IsTbx ? "tbx" : "tbl") << Layout << '\t';
+ printRegName(O, MI->getOperand(0).getReg(), AArch64::vreg);
+ O << ", ";
unsigned ListOpNum = IsTbx ? 2 : 1;
printVectorList(MI, ListOpNum, STI, O, "");
- O << ", "
- << getRegisterName(MI->getOperand(ListOpNum + 1).getReg(), AArch64::vreg);
+ O << ", ";
+ printRegName(O, MI->getOperand(ListOpNum + 1).getReg(), AArch64::vreg);
printAnnotation(O, Annot);
return;
}
@@ -747,16 +816,20 @@ void AArch64AppleInstPrinter::printInst(const MCInst *MI, uint64_t Address,
// Next the address: [xN]
unsigned AddrReg = MI->getOperand(OpNum++).getReg();
- O << ", [" << getRegisterName(AddrReg) << ']';
+ O << ", [";
+ printRegName(O, AddrReg);
+ O << ']';
// Finally, there might be a post-indexed offset.
if (LdStDesc->NaturalOffset != 0) {
unsigned Reg = MI->getOperand(OpNum++).getReg();
- if (Reg != AArch64::XZR)
- O << ", " << getRegisterName(Reg);
- else {
+ if (Reg != AArch64::XZR) {
+ O << ", ";
+ printRegName(O, Reg);
+ } else {
assert(LdStDesc->NaturalOffset && "no offset on post-inc instruction?");
- O << ", #" << LdStDesc->NaturalOffset;
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << LdStDesc->NaturalOffset;
}
}
@@ -767,6 +840,60 @@ void AArch64AppleInstPrinter::printInst(const MCInst *MI, uint64_t Address,
AArch64InstPrinter::printInst(MI, Address, Annot, STI, O);
}
+StringRef AArch64AppleInstPrinter::getRegName(MCRegister Reg) const {
+ return getRegisterName(Reg);
+}
+
+bool AArch64InstPrinter::printRangePrefetchAlias(const MCInst *MI,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O,
+ StringRef Annot) {
+ unsigned Opcode = MI->getOpcode();
+
+#ifndef NDEBUG
+ assert(((Opcode == AArch64::PRFMroX) || (Opcode == AArch64::PRFMroW)) &&
+ "Invalid opcode for RPRFM alias!");
+#endif
+
+ unsigned PRFOp = MI->getOperand(0).getImm();
+ unsigned Mask = 0x18; // 0b11000
+ if ((PRFOp & Mask) != Mask)
+ return false; // Rt != '11xxx', it's a PRFM instruction.
+
+ unsigned Rm = MI->getOperand(2).getReg();
+
+ // "Rm" must be a 64-bit GPR for RPRFM.
+ if (MRI.getRegClass(AArch64::GPR32RegClassID).contains(Rm))
+ Rm = MRI.getMatchingSuperReg(Rm, AArch64::sub_32,
+ &MRI.getRegClass(AArch64::GPR64RegClassID));
+
+ unsigned SignExtend = MI->getOperand(3).getImm(); // encoded in "option<2>".
+ unsigned Shift = MI->getOperand(4).getImm(); // encoded in "S".
+
+ assert((SignExtend <= 1) && "sign extend should be a single bit!");
+ assert((Shift <= 1) && "Shift should be a single bit!");
+
+ unsigned Option0 = (Opcode == AArch64::PRFMroX) ? 1 : 0;
+
+ // encoded in "option<2>:option<0>:S:Rt<2:0>".
+ unsigned RPRFOp =
+ (SignExtend << 5) | (Option0 << 4) | (Shift << 3) | (PRFOp & 0x7);
+
+ O << "\trprfm ";
+ if (auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(RPRFOp))
+ O << RPRFM->Name << ", ";
+ else
+ O << "#" << formatImm(RPRFOp) << ", ";
+ O << getRegisterName(Rm);
+ O << ", [";
+ printOperand(MI, 1, STI, O); // "Rn".
+ O << "]";
+
+ printAnnotation(O, Annot);
+
+ return true;
+}
+
bool AArch64InstPrinter::printSysAlias(const MCInst *MI,
const MCSubtargetInfo &STI,
raw_ostream &O) {
@@ -807,18 +934,23 @@ bool AArch64InstPrinter::printSysAlias(const MCInst *MI,
// Prediction Restriction aliases
case 3: {
Search_PRCTX:
- const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByEncoding(Encoding >> 3);
- if (!PRCTX || !PRCTX->haveFeatures(STI.getFeatureBits()))
+ if (Op1Val != 3 || CnVal != 7 || CmVal != 3)
+ return false;
+
+ const auto Requires =
+ Op2Val == 6 ? AArch64::FeatureSPECRES2 : AArch64::FeaturePredRes;
+ if (!(STI.hasFeature(AArch64::FeatureAll) || STI.hasFeature(Requires)))
return false;
- NeedsReg = PRCTX->NeedsReg;
+ NeedsReg = true;
switch (Op2Val) {
default: return false;
case 4: Ins = "cfp\t"; break;
case 5: Ins = "dvp\t"; break;
+ case 6: Ins = "cosp\t"; break;
case 7: Ins = "cpp\t"; break;
}
- Name = std::string(PRCTX->Name);
+ Name = "RCTX";
}
break;
// IC aliases
@@ -874,8 +1006,70 @@ bool AArch64InstPrinter::printSysAlias(const MCInst *MI,
std::transform(Str.begin(), Str.end(), Str.begin(), ::tolower);
O << '\t' << Str;
- if (NeedsReg)
- O << ", " << getRegisterName(MI->getOperand(4).getReg());
+ if (NeedsReg) {
+ O << ", ";
+ printRegName(O, MI->getOperand(4).getReg());
+ }
+
+ return true;
+}
+
+bool AArch64InstPrinter::printSyspAlias(const MCInst *MI,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+#ifndef NDEBUG
+ unsigned Opcode = MI->getOpcode();
+ assert((Opcode == AArch64::SYSPxt || Opcode == AArch64::SYSPxt_XZR) &&
+ "Invalid opcode for SYSP alias!");
+#endif
+
+ const MCOperand &Op1 = MI->getOperand(0);
+ const MCOperand &Cn = MI->getOperand(1);
+ const MCOperand &Cm = MI->getOperand(2);
+ const MCOperand &Op2 = MI->getOperand(3);
+
+ unsigned Op1Val = Op1.getImm();
+ unsigned CnVal = Cn.getImm();
+ unsigned CmVal = Cm.getImm();
+ unsigned Op2Val = Op2.getImm();
+
+ uint16_t Encoding = Op2Val;
+ Encoding |= CmVal << 3;
+ Encoding |= CnVal << 7;
+ Encoding |= Op1Val << 11;
+
+ std::string Ins;
+ std::string Name;
+
+ if (CnVal == 8 || CnVal == 9) {
+ // TLBIP aliases
+
+ if (CnVal == 9) {
+ if (!STI.hasFeature(AArch64::FeatureXS))
+ return false;
+ Encoding &= ~(1 << 7);
+ }
+
+ const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByEncoding(Encoding);
+ if (!TLBI || !TLBI->haveFeatures(STI.getFeatureBits()))
+ return false;
+
+ Ins = "tlbip\t";
+ Name = std::string(TLBI->Name);
+ if (CnVal == 9)
+ Name += "nXS";
+ } else
+ return false;
+
+ std::string Str = Ins + Name;
+ std::transform(Str.begin(), Str.end(), Str.begin(), ::tolower);
+
+ O << '\t' << Str;
+ O << ", ";
+ if (MI->getOperand(4).getReg() == AArch64::XZR)
+ printSyspXzrPair(MI, 4, STI, O);
+ else
+ printGPRSeqPairsClassOperand<64>(MI, 4, STI, O);
return true;
}
@@ -887,7 +1081,7 @@ void AArch64InstPrinter::printMatrix(const MCInst *MI, unsigned OpNum,
const MCOperand &RegOp = MI->getOperand(OpNum);
assert(RegOp.isReg() && "Unexpected operand type!");
- O << getRegisterName(RegOp.getReg());
+ printRegName(O, RegOp.getReg());
switch (EltSize) {
case 0:
break;
@@ -930,7 +1124,7 @@ void AArch64InstPrinter::printMatrixTile(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &RegOp = MI->getOperand(OpNum);
assert(RegOp.isReg() && "Unexpected operand type!");
- O << getRegisterName(RegOp.getReg());
+ printRegName(O, RegOp.getReg());
}
void AArch64InstPrinter::printSVCROp(const MCInst *MI, unsigned OpNum,
@@ -950,7 +1144,7 @@ void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
unsigned Reg = Op.getReg();
- O << getRegisterName(Reg);
+ printRegName(O, Reg);
} else if (Op.isImm()) {
printImm(MI, OpNo, STI, O);
} else {
@@ -963,14 +1157,14 @@ void AArch64InstPrinter::printImm(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
- O << "#" << formatImm(Op.getImm());
+ markup(O, Markup::Immediate) << "#" << formatImm(Op.getImm());
}
void AArch64InstPrinter::printImmHex(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
- O << format("#%#llx", Op.getImm());
+ markup(O, Markup::Immediate) << format("#%#llx", Op.getImm());
}
template<int Size>
@@ -979,11 +1173,11 @@ void AArch64InstPrinter::printSImm(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Size == 8)
- O << "#" << formatImm((signed char)Op.getImm());
+ markup(O, Markup::Immediate) << "#" << formatImm((signed char)Op.getImm());
else if (Size == 16)
- O << "#" << formatImm((signed short)Op.getImm());
+ markup(O, Markup::Immediate) << "#" << formatImm((signed short)Op.getImm());
else
- O << "#" << formatImm(Op.getImm());
+ markup(O, Markup::Immediate) << "#" << formatImm(Op.getImm());
}
void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
@@ -992,9 +1186,9 @@ void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
if (Op.isReg()) {
unsigned Reg = Op.getReg();
if (Reg == AArch64::XZR)
- O << "#" << Imm;
+ markup(O, Markup::Immediate) << "#" << Imm;
else
- O << getRegisterName(Reg);
+ printRegName(O, Reg);
} else
llvm_unreachable("unknown operand kind in printPostIncOperand64");
}
@@ -1005,7 +1199,7 @@ void AArch64InstPrinter::printVRegOperand(const MCInst *MI, unsigned OpNo,
const MCOperand &Op = MI->getOperand(OpNo);
assert(Op.isReg() && "Non-register vreg operand!");
unsigned Reg = Op.getReg();
- O << getRegisterName(Reg, AArch64::vreg);
+ printRegName(O, Reg, AArch64::vreg);
}
void AArch64InstPrinter::printSysCROperand(const MCInst *MI, unsigned OpNo,
@@ -1025,7 +1219,7 @@ void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
assert(Val == MO.getImm() && "Add/sub immediate out of range!");
unsigned Shift =
AArch64_AM::getShiftValue(MI->getOperand(OpNum + 1).getImm());
- O << '#' << formatImm(Val);
+ markup(O, Markup::Immediate) << '#' << formatImm(Val);
if (Shift != 0) {
printShifter(MI, OpNum + 1, STI, O);
if (CommentStream)
@@ -1043,6 +1237,7 @@ void AArch64InstPrinter::printLogicalImm(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
uint64_t Val = MI->getOperand(OpNum).getImm();
+ WithMarkup M = markup(O, Markup::Immediate);
O << "#0x";
O.write_hex(AArch64_AM::decodeLogicalImmediate(Val, 8 * sizeof(T)));
}
@@ -1056,20 +1251,21 @@ void AArch64InstPrinter::printShifter(const MCInst *MI, unsigned OpNum,
AArch64_AM::getShiftValue(Val) == 0)
return;
O << ", " << AArch64_AM::getShiftExtendName(AArch64_AM::getShiftType(Val))
- << " #" << AArch64_AM::getShiftValue(Val);
+ << " ";
+ markup(O, Markup::Immediate) << "#" << AArch64_AM::getShiftValue(Val);
}
void AArch64InstPrinter::printShiftedRegister(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- O << getRegisterName(MI->getOperand(OpNum).getReg());
+ printRegName(O, MI->getOperand(OpNum).getReg());
printShifter(MI, OpNum + 1, STI, O);
}
void AArch64InstPrinter::printExtendedRegister(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- O << getRegisterName(MI->getOperand(OpNum).getReg());
+ printRegName(O, MI->getOperand(OpNum).getReg());
printArithExtend(MI, OpNum + 1, STI, O);
}
@@ -1090,19 +1286,23 @@ void AArch64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
ExtType == AArch64_AM::UXTX) ||
((Dest == AArch64::WSP || Src1 == AArch64::WSP) &&
ExtType == AArch64_AM::UXTW) ) {
- if (ShiftVal != 0)
- O << ", lsl #" << ShiftVal;
+ if (ShiftVal != 0) {
+ O << ", lsl ";
+ markup(O, Markup::Immediate) << "#" << ShiftVal;
+ }
return;
}
}
O << ", " << AArch64_AM::getShiftExtendName(ExtType);
- if (ShiftVal != 0)
- O << " #" << ShiftVal;
+ if (ShiftVal != 0) {
+ O << " ";
+ markup(O, Markup::Immediate) << "#" << ShiftVal;
+ }
}
-static void printMemExtendImpl(bool SignExtend, bool DoShift,
- unsigned Width, char SrcRegKind,
- raw_ostream &O) {
+void AArch64InstPrinter::printMemExtendImpl(bool SignExtend, bool DoShift,
+ unsigned Width, char SrcRegKind,
+ raw_ostream &O) {
// sxtw, sxtx, uxtw or lsl (== uxtx)
bool IsLSL = !SignExtend && SrcRegKind == 'x';
if (IsLSL)
@@ -1110,8 +1310,10 @@ static void printMemExtendImpl(bool SignExtend, bool DoShift,
else
O << (SignExtend ? 's' : 'u') << "xt" << SrcRegKind;
- if (DoShift || IsLSL)
- O << " #" << Log2_32(Width / 8);
+ if (DoShift || IsLSL) {
+ O << " ";
+ markup(O, Markup::Immediate) << "#" << Log2_32(Width / 8);
+ }
}
void AArch64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
@@ -1140,6 +1342,36 @@ void AArch64InstPrinter::printRegWithShiftExtend(const MCInst *MI,
}
}
+template <int EltSize>
+void AArch64InstPrinter::printPredicateAsCounter(const MCInst *MI,
+ unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ if (Reg < AArch64::PN0 || Reg > AArch64::PN15)
+ llvm_unreachable("Unsupported predicate-as-counter register");
+ O << "pn" << Reg - AArch64::PN0;
+
+ switch (EltSize) {
+ case 0:
+ break;
+ case 8:
+ O << ".b";
+ break;
+ case 16:
+ O << ".h";
+ break;
+ case 32:
+ O << ".s";
+ break;
+ case 64:
+ O << ".d";
+ break;
+ default:
+ llvm_unreachable("Unsupported element size");
+ }
+}
+
void AArch64InstPrinter::printCondCode(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
@@ -1157,21 +1389,33 @@ void AArch64InstPrinter::printInverseCondCode(const MCInst *MI, unsigned OpNum,
void AArch64InstPrinter::printAMNoIndex(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ']';
+ O << '[';
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << ']';
}
-template<int Scale>
+template <int Scale>
void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- O << '#' << formatImm(Scale * MI->getOperand(OpNum).getImm());
+ markup(O, Markup::Immediate)
+ << '#' << formatImm(Scale * MI->getOperand(OpNum).getImm());
+}
+
+template <int Scale, int Offset>
+void AArch64InstPrinter::printImmRangeScale(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned FirstImm = Scale * MI->getOperand(OpNum).getImm();
+ O << formatImm(FirstImm);
+ O << ":" << formatImm(FirstImm + Offset);
}
void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
unsigned Scale, raw_ostream &O) {
const MCOperand MO = MI->getOperand(OpNum);
if (MO.isImm()) {
- O << "#" << formatImm(MO.getImm() * Scale);
+ markup(O, Markup::Immediate) << '#' << formatImm(MO.getImm() * Scale);
} else {
assert(MO.isExpr() && "Unexpected operand type!");
MO.getExpr()->print(O, &MAI);
@@ -1181,9 +1425,11 @@ void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
unsigned Scale, raw_ostream &O) {
const MCOperand MO1 = MI->getOperand(OpNum + 1);
- O << '[' << getRegisterName(MI->getOperand(OpNum).getReg());
+ O << '[';
+ printRegName(O, MI->getOperand(OpNum).getReg());
if (MO1.isImm()) {
- O << ", #" << formatImm(MO1.getImm() * Scale);
+ O << ", ";
+ markup(O, Markup::Immediate) << "#" << formatImm(MO1.getImm() * Scale);
} else {
assert(MO1.isExpr() && "Unexpected operand type!");
O << ", ";
@@ -1192,6 +1438,18 @@ void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
O << ']';
}
+void AArch64InstPrinter::printRPRFMOperand(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned prfop = MI->getOperand(OpNum).getImm();
+ if (auto PRFM = AArch64RPRFM::lookupRPRFMByEncoding(prfop)) {
+ O << PRFM->Name;
+ return;
+ }
+
+ O << '#' << formatImm(prfop);
+}
+
template <bool IsSVEPrefetch>
void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
@@ -1202,12 +1460,15 @@ void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
O << PRFM->Name;
return;
}
- } else if (auto PRFM = AArch64PRFM::lookupPRFMByEncoding(prfop)) {
- O << PRFM->Name;
- return;
+ } else {
+ auto PRFM = AArch64PRFM::lookupPRFMByEncoding(prfop);
+ if (PRFM && PRFM->haveFeatures(STI.getFeatureBits())) {
+ O << PRFM->Name;
+ return;
+ }
}
- O << '#' << formatImm(prfop);
+ markup(O, Markup::Immediate) << '#' << formatImm(prfop);
}
void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum,
@@ -1218,7 +1479,7 @@ void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum,
if (PSB)
O << PSB->Name;
else
- O << '#' << formatImm(psbhintop);
+ markup(O, Markup::Immediate) << '#' << formatImm(psbhintop);
}
void AArch64InstPrinter::printBTIHintOp(const MCInst *MI, unsigned OpNum,
@@ -1229,7 +1490,7 @@ void AArch64InstPrinter::printBTIHintOp(const MCInst *MI, unsigned OpNum,
if (BTI)
O << BTI->Name;
else
- O << '#' << formatImm(btihintop);
+ markup(O, Markup::Immediate) << '#' << formatImm(btihintop);
}
void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
@@ -1240,7 +1501,7 @@ void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
: AArch64_AM::getFPImmFloat(MO.getImm());
// 8 decimal places are enough to perfectly represent permitted floats.
- O << format("#%.8f", FPImm);
+ markup(O, Markup::Immediate) << format("#%.8f", FPImm);
}
static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride = 1) {
@@ -1318,6 +1579,23 @@ static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride = 1) {
case AArch64::Z31:
Reg = AArch64::Z0;
break;
+ case AArch64::P0: Reg = AArch64::P1; break;
+ case AArch64::P1: Reg = AArch64::P2; break;
+ case AArch64::P2: Reg = AArch64::P3; break;
+ case AArch64::P3: Reg = AArch64::P4; break;
+ case AArch64::P4: Reg = AArch64::P5; break;
+ case AArch64::P5: Reg = AArch64::P6; break;
+ case AArch64::P6: Reg = AArch64::P7; break;
+ case AArch64::P7: Reg = AArch64::P8; break;
+ case AArch64::P8: Reg = AArch64::P9; break;
+ case AArch64::P9: Reg = AArch64::P10; break;
+ case AArch64::P10: Reg = AArch64::P11; break;
+ case AArch64::P11: Reg = AArch64::P12; break;
+ case AArch64::P12: Reg = AArch64::P13; break;
+ case AArch64::P13: Reg = AArch64::P14; break;
+ case AArch64::P14: Reg = AArch64::P15; break;
+ // Vector lists can wrap around.
+ case AArch64::P15: Reg = AArch64::P0; break;
}
}
return Reg;
@@ -1337,14 +1615,11 @@ void AArch64InstPrinter::printGPRSeqPairsClassOperand(const MCInst *MI,
unsigned Even = MRI.getSubReg(Reg, Sube);
unsigned Odd = MRI.getSubReg(Reg, Subo);
- O << getRegisterName(Even) << ", " << getRegisterName(Odd);
+ printRegName(O, Even);
+ O << ", ";
+ printRegName(O, Odd);
}
-static const unsigned MatrixZADRegisterTable[] = {
- AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
- AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7
-};
-
void AArch64InstPrinter::printMatrixTileList(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
@@ -1362,7 +1637,7 @@ void AArch64InstPrinter::printMatrixTileList(const MCInst *MI, unsigned OpNum,
unsigned Reg = RegMask & (1 << I);
if (Reg == 0)
continue;
- O << getRegisterName(MatrixZADRegisterTable[I]);
+ printRegName(O, AArch64::ZAD0 + I);
if (Printed + 1 != NumRegs)
O << ", ";
++Printed;
@@ -1383,7 +1658,9 @@ void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
unsigned NumRegs = 1;
if (MRI.getRegClass(AArch64::DDRegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::ZPR2RegClassID).contains(Reg) ||
- MRI.getRegClass(AArch64::QQRegClassID).contains(Reg))
+ MRI.getRegClass(AArch64::QQRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::PPR2RegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::ZPR2StridedRegClassID).contains(Reg))
NumRegs = 2;
else if (MRI.getRegClass(AArch64::DDDRegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::ZPR3RegClassID).contains(Reg) ||
@@ -1391,9 +1668,16 @@ void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
NumRegs = 3;
else if (MRI.getRegClass(AArch64::DDDDRegClassID).contains(Reg) ||
MRI.getRegClass(AArch64::ZPR4RegClassID).contains(Reg) ||
- MRI.getRegClass(AArch64::QQQQRegClassID).contains(Reg))
+ MRI.getRegClass(AArch64::QQQQRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::ZPR4StridedRegClassID).contains(Reg))
NumRegs = 4;
+ unsigned Stride = 1;
+ if (MRI.getRegClass(AArch64::ZPR2StridedRegClassID).contains(Reg))
+ Stride = 8;
+ else if (MRI.getRegClass(AArch64::ZPR4StridedRegClassID).contains(Reg))
+ Stride = 4;
+
// Now forget about the list and find out what the first register is.
if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::dsub0))
Reg = FirstReg;
@@ -1401,6 +1685,8 @@ void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
Reg = FirstReg;
else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::zsub0))
Reg = FirstReg;
+ else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::psub0))
+ Reg = FirstReg;
// If it's a D-reg, we need to promote it to the equivalent Q-reg before
// printing (otherwise getRegisterName fails).
@@ -1410,16 +1696,35 @@ void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
Reg = MRI.getMatchingSuperReg(Reg, AArch64::dsub, &FPR128RC);
}
- for (unsigned i = 0; i < NumRegs; ++i, Reg = getNextVectorRegister(Reg)) {
- if (MRI.getRegClass(AArch64::ZPRRegClassID).contains(Reg))
- O << getRegisterName(Reg) << LayoutSuffix;
- else
- O << getRegisterName(Reg, AArch64::vreg) << LayoutSuffix;
-
- if (i + 1 != NumRegs)
- O << ", ";
+ if ((MRI.getRegClass(AArch64::ZPRRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::PPRRegClassID).contains(Reg)) &&
+ NumRegs > 1 && Stride == 1 &&
+ // Do not print the range when the last register is lower than the first.
+ // Because it is a wrap-around register.
+ Reg < getNextVectorRegister(Reg, NumRegs - 1)) {
+ printRegName(O, Reg);
+ O << LayoutSuffix;
+ if (NumRegs > 1) {
+ // Set of two sve registers should be separated by ','
+ StringRef split_char = NumRegs == 2 ? ", " : " - ";
+ O << split_char;
+ printRegName(O, (getNextVectorRegister(Reg, NumRegs - 1)));
+ O << LayoutSuffix;
+ }
+ } else {
+ for (unsigned i = 0; i < NumRegs;
+ ++i, Reg = getNextVectorRegister(Reg, Stride)) {
+ // wrap-around sve register
+ if (MRI.getRegClass(AArch64::ZPRRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::PPRRegClassID).contains(Reg))
+ printRegName(O, Reg);
+ else
+ printRegName(O, Reg, AArch64::vreg);
+ O << LayoutSuffix;
+ if (i + 1 != NumRegs)
+ O << ", ";
+ }
}
-
O << " }";
}
@@ -1435,6 +1740,10 @@ template <unsigned NumLanes, char LaneKind>
void AArch64InstPrinter::printTypedVectorList(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
+ if (LaneKind == 0) {
+ printVectorList(MI, OpNum, STI, O, "");
+ return;
+ }
std::string Suffix(".");
if (NumLanes)
Suffix += itostr(NumLanes) + LaneKind;
@@ -1444,16 +1753,18 @@ void AArch64InstPrinter::printTypedVectorList(const MCInst *MI, unsigned OpNum,
printVectorList(MI, OpNum, STI, O, Suffix);
}
+template <unsigned Scale>
void AArch64InstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- O << "[" << MI->getOperand(OpNum).getImm() << "]";
+ O << "[" << Scale * MI->getOperand(OpNum).getImm() << "]";
}
+template <unsigned Scale>
void AArch64InstPrinter::printMatrixIndex(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- O << MI->getOperand(OpNum).getImm();
+ O << Scale * MI->getOperand(OpNum).getImm();
}
void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, uint64_t Address,
@@ -1467,9 +1778,9 @@ void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, uint64_t Address,
if (Op.isImm()) {
int64_t Offset = Op.getImm() * 4;
if (PrintBranchImmAsAddress)
- O << formatHex(Address + Offset);
+ markup(O, Markup::Target) << formatHex(Address + Offset);
else
- O << "#" << formatImm(Offset);
+ markup(O, Markup::Immediate) << "#" << formatImm(Offset);
return;
}
@@ -1478,27 +1789,32 @@ void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, uint64_t Address,
dyn_cast<MCConstantExpr>(MI->getOperand(OpNum).getExpr());
int64_t TargetAddress;
if (BranchTarget && BranchTarget->evaluateAsAbsolute(TargetAddress)) {
- O << formatHex(TargetAddress);
+ markup(O, Markup::Target) << formatHex((uint64_t)TargetAddress);
} else {
// Otherwise, just print the expression.
MI->getOperand(OpNum).getExpr()->print(O, &MAI);
}
}
-void AArch64InstPrinter::printAdrpLabel(const MCInst *MI, uint64_t Address,
- unsigned OpNum,
- const MCSubtargetInfo &STI,
- raw_ostream &O) {
+void AArch64InstPrinter::printAdrAdrpLabel(const MCInst *MI, uint64_t Address,
+ unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNum);
// If the label has already been resolved to an immediate offset (say, when
// we're running the disassembler), just print the immediate.
if (Op.isImm()) {
- const int64_t Offset = Op.getImm() * 4096;
+ int64_t Offset = Op.getImm();
+ if (MI->getOpcode() == AArch64::ADRP) {
+ Offset = Offset * 4096;
+ Address = Address & -4096;
+ }
+ WithMarkup M = markup(O, Markup::Immediate);
if (PrintBranchImmAsAddress)
- O << formatHex((Address & -4096) + Offset);
+ markup(O, Markup::Target) << formatHex(Address + Offset);
else
- O << "#" << Offset;
+ markup(O, Markup::Immediate) << "#" << Offset;
return;
}
@@ -1526,7 +1842,7 @@ void AArch64InstPrinter::printBarrierOption(const MCInst *MI, unsigned OpNo,
if (!Name.empty())
O << Name;
else
- O << "#" << Val;
+ markup(O, Markup::Immediate) << "#" << Val;
}
void AArch64InstPrinter::printBarriernXSOption(const MCInst *MI, unsigned OpNo,
@@ -1542,7 +1858,7 @@ void AArch64InstPrinter::printBarriernXSOption(const MCInst *MI, unsigned OpNo,
if (!Name.empty())
O << Name;
else
- O << "#" << Val;
+ markup(O, Markup::Immediate) << "#" << Val;
}
static bool isValidSysReg(const AArch64SysReg::SysReg *Reg, bool Read,
@@ -1626,9 +1942,12 @@ void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
- auto PState = AArch64PState::lookupPStateByEncoding(Val);
- if (PState && PState->haveFeatures(STI.getFeatureBits()))
- O << PState->Name;
+ auto PStateImm15 = AArch64PState::lookupPStateImm0_15ByEncoding(Val);
+ auto PStateImm1 = AArch64PState::lookupPStateImm0_1ByEncoding(Val);
+ if (PStateImm15 && PStateImm15->haveFeatures(STI.getFeatureBits()))
+ O << PStateImm15->Name;
+ else if (PStateImm1 && PStateImm1->haveFeatures(STI.getFeatureBits()))
+ O << PStateImm1->Name;
else
O << "#" << formatImm(Val);
}
@@ -1638,7 +1957,7 @@ void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
unsigned RawVal = MI->getOperand(OpNo).getImm();
uint64_t Val = AArch64_AM::decodeAdvSIMDModImmType10(RawVal);
- O << format("#%#016llx", Val);
+ markup(O, Markup::Immediate) << format("#%#016llx", Val);
}
template<int64_t Angle, int64_t Remainder>
@@ -1646,7 +1965,7 @@ void AArch64InstPrinter::printComplexRotationOp(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
- O << "#" << (Val * Angle) + Remainder;
+ markup(O, Markup::Immediate) << "#" << (Val * Angle) + Remainder;
}
void AArch64InstPrinter::printSVEPattern(const MCInst *MI, unsigned OpNum,
@@ -1656,7 +1975,22 @@ void AArch64InstPrinter::printSVEPattern(const MCInst *MI, unsigned OpNum,
if (auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByEncoding(Val))
O << Pat->Name;
else
- O << '#' << formatImm(Val);
+ markup(O, Markup::Immediate) << '#' << formatImm(Val);
+}
+
+void AArch64InstPrinter::printSVEVecLenSpecifier(const MCInst *MI,
+ unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNum).getImm();
+ // Pattern has only 1 bit
+ if (Val > 1)
+ llvm_unreachable("Invalid vector length specifier");
+ if (auto Pat =
+ AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByEncoding(Val))
+ O << Pat->Name;
+ else
+ llvm_unreachable("Invalid vector length specifier");
}
template <char suffix>
@@ -1675,7 +2009,7 @@ void AArch64InstPrinter::printSVERegOp(const MCInst *MI, unsigned OpNum,
}
unsigned Reg = MI->getOperand(OpNum).getReg();
- O << getRegisterName(Reg);
+ printRegName(O, Reg);
if (suffix != 0)
O << '.' << suffix;
}
@@ -1685,9 +2019,9 @@ void AArch64InstPrinter::printImmSVE(T Value, raw_ostream &O) {
std::make_unsigned_t<T> HexValue = Value;
if (getPrintImmHex())
- O << '#' << formatHex((uint64_t)HexValue);
+ markup(O, Markup::Immediate) << '#' << formatHex((uint64_t)HexValue);
else
- O << '#' << formatDec(Value);
+ markup(O, Markup::Immediate) << '#' << formatDec(Value);
if (CommentStream) {
// Do the opposite to that used for instruction operands.
@@ -1709,7 +2043,7 @@ void AArch64InstPrinter::printImm8OptLsl(const MCInst *MI, unsigned OpNum,
// #0 lsl #8 is never pretty printed
if ((UnscaledVal == 0) && (AArch64_AM::getShiftValue(Shift) != 0)) {
- O << '#' << formatImm(UnscaledVal);
+ markup(O, Markup::Immediate) << '#' << formatImm(UnscaledVal);
printShifter(MI, OpNum + 1, STI, O);
return;
}
@@ -1739,7 +2073,7 @@ void AArch64InstPrinter::printSVELogicalImm(const MCInst *MI, unsigned OpNum,
else if ((uint16_t)PrintVal == PrintVal)
printImmSVE(PrintVal, O);
else
- O << '#' << formatHex((uint64_t)PrintVal);
+ markup(O, Markup::Immediate) << '#' << formatHex((uint64_t)PrintVal);
}
template <int Width>
@@ -1757,7 +2091,7 @@ void AArch64InstPrinter::printZPRasFPR(const MCInst *MI, unsigned OpNum,
llvm_unreachable("Unsupported width");
}
unsigned Reg = MI->getOperand(OpNum).getReg();
- O << getRegisterName(Reg - AArch64::Z0 + Base);
+ printRegName(O, Reg - AArch64::Z0 + Base);
}
template <unsigned ImmIs0, unsigned ImmIs1>
@@ -1767,19 +2101,29 @@ void AArch64InstPrinter::printExactFPImm(const MCInst *MI, unsigned OpNum,
auto *Imm0Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmIs0);
auto *Imm1Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmIs1);
unsigned Val = MI->getOperand(OpNum).getImm();
- O << "#" << (Val ? Imm1Desc->Repr : Imm0Desc->Repr);
+ markup(O, Markup::Immediate)
+ << "#" << (Val ? Imm1Desc->Repr : Imm0Desc->Repr);
}
void AArch64InstPrinter::printGPR64as32(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Reg = MI->getOperand(OpNum).getReg();
- O << getRegisterName(getWRegFromXReg(Reg));
+ printRegName(O, getWRegFromXReg(Reg));
}
void AArch64InstPrinter::printGPR64x8(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned Reg = MI->getOperand(OpNum).getReg();
- O << getRegisterName(MRI.getSubReg(Reg, AArch64::x8sub_0));
+ printRegName(O, MRI.getSubReg(Reg, AArch64::x8sub_0));
+}
+
+void AArch64InstPrinter::printSyspXzrPair(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ assert(Reg == AArch64::XZR &&
+ "MC representation of SyspXzrPair should be XZR");
+ O << getRegisterName(Reg) << ", " << getRegisterName(Reg);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h
index d36fb30a0ce6..9dccdf42361b 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h
@@ -29,7 +29,8 @@ public:
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot,
const MCSubtargetInfo &STI, raw_ostream &O) override;
- void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printRegName(raw_ostream &OS, MCRegister Reg) const override;
+ void printRegName(raw_ostream &OS, MCRegister Reg, unsigned AltIdx) const;
// Autogenerated by tblgen.
std::pair<const char *, uint64_t> getMnemonic(const MCInst *MI) override;
@@ -42,16 +43,18 @@ public:
const MCSubtargetInfo &STI,
raw_ostream &O);
- virtual StringRef getRegName(unsigned RegNo) const {
- return getRegisterName(RegNo);
- }
+ virtual StringRef getRegName(MCRegister Reg) const;
- static const char *getRegisterName(unsigned RegNo,
+ static const char *getRegisterName(MCRegister Reg,
unsigned AltIdx = AArch64::NoRegAltName);
protected:
bool printSysAlias(const MCInst *MI, const MCSubtargetInfo &STI,
raw_ostream &O);
+ bool printSyspAlias(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ bool printRangePrefetchAlias(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O, StringRef Annot);
// Operand printers
void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
@@ -88,7 +91,8 @@ protected:
const MCSubtargetInfo &STI, raw_ostream &O);
void printArithExtend(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
-
+ void printMemExtendImpl(bool SignExtend, bool DoShift, unsigned Width,
+ char SrcRegKind, raw_ostream &O);
void printMemExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O,
char SrcRegKind, unsigned Width);
template <char SrcRegKind, unsigned Width>
@@ -129,10 +133,17 @@ protected:
void printImmScale(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
+ template <int Scale, int Offset>
+ void printImmRangeScale(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
template <bool IsSVEPrefetch = false>
void printPrefetchOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
+ void printRPRFMOperand(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
void printPSBHintOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
@@ -159,12 +170,14 @@ protected:
void printTypedVectorList(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
+ template <unsigned Scale = 1>
void printVectorIndex(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
+ template <unsigned Scale = 1>
void printMatrixIndex(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
- void printAdrpLabel(const MCInst *MI, uint64_t Address, unsigned OpNum,
- const MCSubtargetInfo &STI, raw_ostream &O);
+ void printAdrAdrpLabel(const MCInst *MI, uint64_t Address, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
void printBarrierOption(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
void printBarriernXSOption(const MCInst *MI, unsigned OpNum,
@@ -177,6 +190,9 @@ protected:
const MCSubtargetInfo &STI, raw_ostream &O);
void printSIMDType10Operand(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
+ template <int EltSize>
+ void printPredicateAsCounter(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
template<int64_t Angle, int64_t Remainder>
void printComplexRotationOp(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI, raw_ostream &O);
@@ -192,6 +208,8 @@ protected:
const MCSubtargetInfo &STI, raw_ostream &O);
void printSVEPattern(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
+ void printSVEVecLenSpecifier(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
template <bool IsVertical>
void printMatrixTileVector(const MCInst *MI, unsigned OpNum,
@@ -210,6 +228,8 @@ protected:
const MCSubtargetInfo &STI, raw_ostream &O);
void printGPR64x8(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
+ void printSyspXzrPair(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
template <int Width>
void printZPRasFPR(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
@@ -236,11 +256,9 @@ public:
const MCSubtargetInfo &STI,
raw_ostream &O) override;
- StringRef getRegName(unsigned RegNo) const override {
- return getRegisterName(RegNo);
- }
+ StringRef getRegName(MCRegister Reg) const override;
- static const char *getRegisterName(unsigned RegNo,
+ static const char *getRegisterName(MCRegister Reg,
unsigned AltIdx = AArch64::NoRegAltName);
};
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
index 68c721cb0d72..62eac059371e 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -11,11 +11,11 @@
//===----------------------------------------------------------------------===//
#include "AArch64MCAsmInfo.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/TargetParser/Triple.h"
using namespace llvm;
enum AsmWriterVariantTy {
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
index ad97071434df..c3e12b6d8024 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
@@ -16,6 +16,7 @@
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCFixup.h"
@@ -42,11 +43,9 @@ namespace {
class AArch64MCCodeEmitter : public MCCodeEmitter {
MCContext &Ctx;
- const MCInstrInfo &MCII;
public:
- AArch64MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
- : Ctx(ctx), MCII(mcii) {}
+ AArch64MCCodeEmitter(const MCInstrInfo &, MCContext &ctx) : Ctx(ctx) {}
AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete;
void operator=(const AArch64MCCodeEmitter &) = delete;
~AArch64MCCodeEmitter() override = default;
@@ -89,6 +88,12 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ /// getPAuthPCRelOpValue - Return the encoded value for a pointer
+ /// authentication pc-relative operand.
+ uint32_t getPAuthPCRelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
/// getLoadLiteralOpValue - Return the encoded value for a load-literal
/// pc-relative address.
uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
@@ -172,7 +177,7 @@ public:
unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
const MCSubtargetInfo &STI) const;
- void encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
@@ -186,18 +191,28 @@ public:
unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue,
const MCSubtargetInfo &STI) const;
+ template <unsigned Multiple>
+ uint32_t EncodeRegAsMultipleOf(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t EncodePNR_p8to15(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ uint32_t EncodeZPR2StridedRegisterClass(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t EncodeZPR4StridedRegisterClass(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
uint32_t EncodeMatrixTileListRegisterClass(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ template <unsigned BaseReg>
uint32_t encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
-
-private:
- FeatureBitset computeAvailableFeatures(const FeatureBitset &FB) const;
- void
- verifyInstructionPredicates(const MCInst &MI,
- const FeatureBitset &AvailableFeatures) const;
};
} // end anonymous namespace
@@ -318,6 +333,29 @@ uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
return 0;
}
+/// getPAuthPCRelOpValue - Return the encoded value for a pointer
+/// authentication pc-relative operand.
+uint32_t
+AArch64MCCodeEmitter::getPAuthPCRelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, invert sign as it's a negative value
+ // that should be encoded as unsigned
+ if (MO.isImm())
+ return -(MO.getImm());
+ assert(MO.isExpr() && "Unexpected target type!");
+
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch16);
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
+
+ ++MCNumFixups;
+
+ // All of the information is in the fixup.
+ return 0;
+}
+
/// getLoadLiteralOpValue - Return the encoded value for a load-literal
/// pc-relative address.
uint32_t
@@ -523,6 +561,45 @@ AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
return MO.getImm() - 8;
}
+template <unsigned Multiple>
+uint32_t
+AArch64MCCodeEmitter::EncodeRegAsMultipleOf(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ assert(llvm::isPowerOf2_32(Multiple) && "Multiple is not a power of 2");
+ auto RegOpnd = MI.getOperand(OpIdx).getReg();
+ unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd);
+ return RegVal / Multiple;
+}
+
+uint32_t
+AArch64MCCodeEmitter::EncodePNR_p8to15(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ auto RegOpnd = MI.getOperand(OpIdx).getReg();
+ return RegOpnd - AArch64::PN8;
+}
+
+uint32_t AArch64MCCodeEmitter::EncodeZPR2StridedRegisterClass(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ auto RegOpnd = MI.getOperand(OpIdx).getReg();
+ unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd);
+ unsigned T = (RegVal & 0x10) >> 1;
+ unsigned Zt = RegVal & 0x7;
+ return T | Zt;
+}
+
+uint32_t AArch64MCCodeEmitter::EncodeZPR4StridedRegisterClass(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ auto RegOpnd = MI.getOperand(OpIdx).getReg();
+ unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd);
+ unsigned T = (RegVal & 0x10) >> 2;
+ unsigned Zt = RegVal & 0x3;
+ return T | Zt;
+}
+
uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass(
const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
@@ -531,14 +608,13 @@ uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass(
return RegMask;
}
+template <unsigned BaseReg>
uint32_t
AArch64MCCodeEmitter::encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
auto RegOpnd = MI.getOperand(OpIdx).getReg();
- assert(RegOpnd >= AArch64::W12 && RegOpnd <= AArch64::W15 &&
- "Expected register in the range w12-w15!");
- return RegOpnd - AArch64::W12;
+ return RegOpnd - BaseReg;
}
uint32_t
@@ -614,12 +690,11 @@ unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
return EncodedValue;
}
-void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
+void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI,
+ SmallVectorImpl<char> &CB,
+
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
- verifyInstructionPredicates(MI,
- computeAvailableFeatures(STI.getFeatureBits()));
-
if (MI.getOpcode() == AArch64::TLSDESCCALL) {
// This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
// following (BLR) instruction. It doesn't emit any code itself so it
@@ -633,15 +708,13 @@ void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
return;
}
- if (MI.getOpcode() == AArch64::CompilerBarrier ||
- MI.getOpcode() == AArch64::SPACE) {
- // CompilerBarrier just prevents the compiler from reordering accesses, and
+ if (MI.getOpcode() == AArch64::SPACE) {
// SPACE just increases basic block size, in both cases no actual code.
return;
}
uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
- support::endian::write<uint32_t>(OS, Binary, support::little);
+ support::endian::write<uint32_t>(CB, Binary, llvm::endianness::little);
++MCNumEmitted; // Keep track of the # of mi's emitted.
}
@@ -673,11 +746,9 @@ unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
return EncodedValue;
}
-#define ENABLE_INSTR_PREDICATE_VERIFIER
#include "AArch64GenMCCodeEmitter.inc"
MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
MCContext &Ctx) {
return new AArch64MCCodeEmitter(MCII, Ctx);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
index 844bd6bbada9..0c5a9d79f6cb 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
@@ -17,6 +17,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCValue.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
@@ -153,3 +154,47 @@ void AArch64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
}
+
+const AArch64AuthMCExpr *AArch64AuthMCExpr::create(const MCExpr *Expr,
+ uint16_t Discriminator,
+ AArch64PACKey::ID Key,
+ bool HasAddressDiversity,
+ MCContext &Ctx) {
+ return new (Ctx)
+ AArch64AuthMCExpr(Expr, Discriminator, Key, HasAddressDiversity);
+}
+
+void AArch64AuthMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
+ bool WrapSubExprInParens = !isa<MCSymbolRefExpr>(getSubExpr());
+ if (WrapSubExprInParens)
+ OS << '(';
+ getSubExpr()->print(OS, MAI);
+ if (WrapSubExprInParens)
+ OS << ')';
+
+ OS << "@AUTH(" << AArch64PACKeyIDToString(Key) << ',' << Discriminator;
+ if (hasAddressDiversity())
+ OS << ",addr";
+ OS << ')';
+}
+
+void AArch64AuthMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
+}
+
+MCFragment *AArch64AuthMCExpr::findAssociatedFragment() const {
+ llvm_unreachable("FIXME: what goes here?");
+}
+
+bool AArch64AuthMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const {
+ if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup))
+ return false;
+
+ if (Res.getSymB())
+ report_fatal_error("Auth relocation can't reference two symbols");
+
+ Res = MCValue::get(Res.getSymA(), nullptr, Res.getConstant(), getKind());
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
index d3e834a140b2..48235988869c 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
@@ -14,7 +14,9 @@
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCEXPR_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCEXPR_H
+#include "Utils/AArch64BaseInfo.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
@@ -34,6 +36,8 @@ public:
VK_TPREL = 0x007,
VK_TLSDESC = 0x008,
VK_SECREL = 0x009,
+ VK_AUTH = 0x00a,
+ VK_AUTHADDR = 0x00b,
VK_SymLocBits = 0x00f,
// Variants specifying which part of the final address calculation is
@@ -116,6 +120,7 @@ private:
const MCExpr *Expr;
const VariantKind Kind;
+protected:
explicit AArch64MCExpr(const MCExpr *Expr, VariantKind Kind)
: Expr(Expr), Kind(Kind) {}
@@ -170,8 +175,42 @@ public:
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
}
+};
+
+class AArch64AuthMCExpr final : public AArch64MCExpr {
+ uint16_t Discriminator;
+ AArch64PACKey::ID Key;
+
+ explicit AArch64AuthMCExpr(const MCExpr *Expr, uint16_t Discriminator,
+ AArch64PACKey::ID Key, bool HasAddressDiversity)
+ : AArch64MCExpr(Expr, HasAddressDiversity ? VK_AUTHADDR : VK_AUTH),
+ Discriminator(Discriminator), Key(Key) {}
+
+public:
+ static const AArch64AuthMCExpr *
+ create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key,
+ bool HasAddressDiversity, MCContext &Ctx);
+
+ AArch64PACKey::ID getKey() const { return Key; }
+ uint16_t getDiscriminator() const { return Discriminator; }
+ bool hasAddressDiversity() const { return getKind() == VK_AUTHADDR; }
+
+ void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
+
+ void visitUsedExpr(MCStreamer &Streamer) const override;
- static bool classof(const AArch64MCExpr *) { return true; }
+ MCFragment *findAssociatedFragment() const override;
+
+ bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const override;
+
+ static bool classof(const MCExpr *E) {
+ return isa<AArch64MCExpr>(E) && classof(cast<AArch64MCExpr>(E));
+ }
+
+ static bool classof(const AArch64MCExpr *E) {
+ return E->getKind() == VK_AUTH || E->getKind() == VK_AUTHADDR;
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index c1186ae804d2..043f0a03b797 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -34,6 +34,7 @@ using namespace llvm;
#define GET_INSTRINFO_MC_DESC
#define GET_INSTRINFO_MC_HELPERS
+#define ENABLE_INSTR_PREDICATE_VERIFIER
#include "AArch64GenInstrInfo.inc"
#define GET_SUBTARGETINFO_MC_DESC
@@ -52,21 +53,14 @@ static MCSubtargetInfo *
createAArch64MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
if (CPU.empty()) {
CPU = "generic";
+ if (FS.empty())
+ FS = "+v8a";
if (TT.isArm64e())
CPU = "apple-a12";
}
- // Most of the NEON instruction set isn't supported in streaming mode on SME
- // targets, disable NEON unless explicitly requested.
- bool RequestedNEON = FS.contains("neon");
- bool RequestedStreamingSVE = FS.contains("streaming-sve");
- MCSubtargetInfo *STI =
- createAArch64MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, FS);
- if (RequestedStreamingSVE && !RequestedNEON &&
- STI->hasFeature(AArch64::FeatureNEON))
- STI->ToggleFeature(AArch64::FeatureNEON);
- return STI;
+ return createAArch64MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, FS);
}
void AArch64_MC::initLLVMToCVRegMapping(MCRegisterInfo *MRI) {
@@ -237,12 +231,107 @@ void AArch64_MC::initLLVMToCVRegMapping(MCRegisterInfo *MRI) {
{codeview::RegisterId::ARM64_Q29, AArch64::Q29},
{codeview::RegisterId::ARM64_Q30, AArch64::Q30},
{codeview::RegisterId::ARM64_Q31, AArch64::Q31},
-
+ {codeview::RegisterId::ARM64_B0, AArch64::B0},
+ {codeview::RegisterId::ARM64_B1, AArch64::B1},
+ {codeview::RegisterId::ARM64_B2, AArch64::B2},
+ {codeview::RegisterId::ARM64_B3, AArch64::B3},
+ {codeview::RegisterId::ARM64_B4, AArch64::B4},
+ {codeview::RegisterId::ARM64_B5, AArch64::B5},
+ {codeview::RegisterId::ARM64_B6, AArch64::B6},
+ {codeview::RegisterId::ARM64_B7, AArch64::B7},
+ {codeview::RegisterId::ARM64_B8, AArch64::B8},
+ {codeview::RegisterId::ARM64_B9, AArch64::B9},
+ {codeview::RegisterId::ARM64_B10, AArch64::B10},
+ {codeview::RegisterId::ARM64_B11, AArch64::B11},
+ {codeview::RegisterId::ARM64_B12, AArch64::B12},
+ {codeview::RegisterId::ARM64_B13, AArch64::B13},
+ {codeview::RegisterId::ARM64_B14, AArch64::B14},
+ {codeview::RegisterId::ARM64_B15, AArch64::B15},
+ {codeview::RegisterId::ARM64_B16, AArch64::B16},
+ {codeview::RegisterId::ARM64_B17, AArch64::B17},
+ {codeview::RegisterId::ARM64_B18, AArch64::B18},
+ {codeview::RegisterId::ARM64_B19, AArch64::B19},
+ {codeview::RegisterId::ARM64_B20, AArch64::B20},
+ {codeview::RegisterId::ARM64_B21, AArch64::B21},
+ {codeview::RegisterId::ARM64_B22, AArch64::B22},
+ {codeview::RegisterId::ARM64_B23, AArch64::B23},
+ {codeview::RegisterId::ARM64_B24, AArch64::B24},
+ {codeview::RegisterId::ARM64_B25, AArch64::B25},
+ {codeview::RegisterId::ARM64_B26, AArch64::B26},
+ {codeview::RegisterId::ARM64_B27, AArch64::B27},
+ {codeview::RegisterId::ARM64_B28, AArch64::B28},
+ {codeview::RegisterId::ARM64_B29, AArch64::B29},
+ {codeview::RegisterId::ARM64_B30, AArch64::B30},
+ {codeview::RegisterId::ARM64_B31, AArch64::B31},
+ {codeview::RegisterId::ARM64_H0, AArch64::H0},
+ {codeview::RegisterId::ARM64_H1, AArch64::H1},
+ {codeview::RegisterId::ARM64_H2, AArch64::H2},
+ {codeview::RegisterId::ARM64_H3, AArch64::H3},
+ {codeview::RegisterId::ARM64_H4, AArch64::H4},
+ {codeview::RegisterId::ARM64_H5, AArch64::H5},
+ {codeview::RegisterId::ARM64_H6, AArch64::H6},
+ {codeview::RegisterId::ARM64_H7, AArch64::H7},
+ {codeview::RegisterId::ARM64_H8, AArch64::H8},
+ {codeview::RegisterId::ARM64_H9, AArch64::H9},
+ {codeview::RegisterId::ARM64_H10, AArch64::H10},
+ {codeview::RegisterId::ARM64_H11, AArch64::H11},
+ {codeview::RegisterId::ARM64_H12, AArch64::H12},
+ {codeview::RegisterId::ARM64_H13, AArch64::H13},
+ {codeview::RegisterId::ARM64_H14, AArch64::H14},
+ {codeview::RegisterId::ARM64_H15, AArch64::H15},
+ {codeview::RegisterId::ARM64_H16, AArch64::H16},
+ {codeview::RegisterId::ARM64_H17, AArch64::H17},
+ {codeview::RegisterId::ARM64_H18, AArch64::H18},
+ {codeview::RegisterId::ARM64_H19, AArch64::H19},
+ {codeview::RegisterId::ARM64_H20, AArch64::H20},
+ {codeview::RegisterId::ARM64_H21, AArch64::H21},
+ {codeview::RegisterId::ARM64_H22, AArch64::H22},
+ {codeview::RegisterId::ARM64_H23, AArch64::H23},
+ {codeview::RegisterId::ARM64_H24, AArch64::H24},
+ {codeview::RegisterId::ARM64_H25, AArch64::H25},
+ {codeview::RegisterId::ARM64_H26, AArch64::H26},
+ {codeview::RegisterId::ARM64_H27, AArch64::H27},
+ {codeview::RegisterId::ARM64_H28, AArch64::H28},
+ {codeview::RegisterId::ARM64_H29, AArch64::H29},
+ {codeview::RegisterId::ARM64_H30, AArch64::H30},
+ {codeview::RegisterId::ARM64_H31, AArch64::H31},
};
for (const auto &I : RegMap)
MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg));
}
+bool AArch64_MC::isHForm(const MCInst &MI, const MCInstrInfo *MCII) {
+ const auto &FPR16 = AArch64MCRegisterClasses[AArch64::FPR16RegClassID];
+ return llvm::any_of(MI, [&](const MCOperand &Op) {
+ return Op.isReg() && FPR16.contains(Op.getReg());
+ });
+}
+
+bool AArch64_MC::isQForm(const MCInst &MI, const MCInstrInfo *MCII) {
+ const auto &FPR128 = AArch64MCRegisterClasses[AArch64::FPR128RegClassID];
+ return llvm::any_of(MI, [&](const MCOperand &Op) {
+ return Op.isReg() && FPR128.contains(Op.getReg());
+ });
+}
+
+bool AArch64_MC::isFpOrNEON(const MCInst &MI, const MCInstrInfo *MCII) {
+ const auto &FPR128 = AArch64MCRegisterClasses[AArch64::FPR128RegClassID];
+ const auto &FPR64 = AArch64MCRegisterClasses[AArch64::FPR64RegClassID];
+ const auto &FPR32 = AArch64MCRegisterClasses[AArch64::FPR32RegClassID];
+ const auto &FPR16 = AArch64MCRegisterClasses[AArch64::FPR16RegClassID];
+ const auto &FPR8 = AArch64MCRegisterClasses[AArch64::FPR8RegClassID];
+
+ auto IsFPR = [&](const MCOperand &Op) {
+ if (!Op.isReg())
+ return false;
+ auto Reg = Op.getReg();
+ return FPR128.contains(Reg) || FPR64.contains(Reg) || FPR32.contains(Reg) ||
+ FPR16.contains(Reg) || FPR8.contains(Reg);
+ };
+
+ return llvm::any_of(MI, IsFPR);
+}
+
static MCRegisterInfo *createAArch64MCRegisterInfo(const Triple &Triple) {
MCRegisterInfo *X = new MCRegisterInfo();
InitAArch64MCRegisterInfo(X, AArch64::LR);
@@ -329,9 +418,14 @@ public:
// condition code) and cbz (where it is a register).
const auto &Desc = Info->get(Inst.getOpcode());
for (unsigned i = 0, e = Inst.getNumOperands(); i != e; i++) {
- if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_PCREL) {
- int64_t Imm = Inst.getOperand(i).getImm() * 4;
- Target = Addr + Imm;
+ if (Desc.operands()[i].OperandType == MCOI::OPERAND_PCREL) {
+ int64_t Imm = Inst.getOperand(i).getImm();
+ if (Inst.getOpcode() == AArch64::ADR)
+ Target = Addr + Imm;
+ else if (Inst.getOpcode() == AArch64::ADRP)
+ Target = (Addr & -4096) + Imm * 4096;
+ else
+ Target = Addr + Imm * 4;
return true;
}
}
@@ -340,7 +434,6 @@ public:
std::vector<std::pair<uint64_t, uint64_t>>
findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
- uint64_t GotPltSectionVA,
const Triple &TargetTriple) const override {
// Do a lightweight parsing of PLT entries.
std::vector<std::pair<uint64_t, uint64_t>> Result;
@@ -413,6 +506,10 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64TargetMC() {
// Register the asm streamer.
TargetRegistry::RegisterAsmTargetStreamer(*T,
createAArch64AsmTargetStreamer);
+ // Register the null streamer.
+ TargetRegistry::RegisterNullTargetStreamer(*T,
+ createAArch64NullTargetStreamer);
+
// Register the MCInstPrinter.
TargetRegistry::RegisterMCInstPrinter(*T, createAArch64MCInstPrinter);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
index 66cb7a37a958..7b4f102840aa 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
@@ -13,6 +13,7 @@
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCTARGETDESC_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCTARGETDESC_H
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/DataTypes.h"
#include <memory>
@@ -22,6 +23,7 @@ class formatted_raw_ostream;
class MCAsmBackend;
class MCCodeEmitter;
class MCContext;
+class MCInst;
class MCInstrInfo;
class MCInstPrinter;
class MCRegisterInfo;
@@ -31,9 +33,9 @@ class MCSubtargetInfo;
class MCTargetOptions;
class MCTargetStreamer;
class Target;
+class Triple;
MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
MCContext &Ctx);
MCAsmBackend *createAArch64leAsmBackend(const Target &T,
const MCSubtargetInfo &STI,
@@ -51,7 +53,8 @@ std::unique_ptr<MCObjectTargetWriter>
createAArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype,
bool IsILP32);
-std::unique_ptr<MCObjectTargetWriter> createAArch64WinCOFFObjectWriter();
+std::unique_ptr<MCObjectTargetWriter>
+createAArch64WinCOFFObjectWriter(const Triple &TheTriple);
MCTargetStreamer *createAArch64AsmTargetStreamer(MCStreamer &S,
formatted_raw_ostream &OS,
@@ -60,8 +63,17 @@ MCTargetStreamer *createAArch64AsmTargetStreamer(MCStreamer &S,
namespace AArch64_MC {
void initLLVMToCVRegMapping(MCRegisterInfo *MRI);
+bool isHForm(const MCInst &MI, const MCInstrInfo *MCII);
+bool isQForm(const MCInst &MI, const MCInstrInfo *MCII);
+bool isFpOrNEON(const MCInst &MI, const MCInstrInfo *MCII);
}
+namespace AArch64 {
+enum OperandType {
+ OPERAND_IMPLICIT_IMM_0 = MCOI::OPERAND_FIRST_TARGET,
+};
+} // namespace AArch64
+
} // End llvm namespace
// Defines symbolic names for AArch64 registers. This defines a mapping from
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
index 012661edbbfd..04bd85260c56 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
@@ -139,13 +139,11 @@ static bool canUseLocalRelocation(const MCSectionMachO &Section,
return false;
if (RefSec.getSegmentName() == "__DATA" &&
- RefSec.getName() == "__objc_classrefs")
+ (RefSec.getName() == "__cfstring" ||
+ RefSec.getName() == "__objc_classrefs"))
return false;
- // FIXME: ld64 currently handles internal pointer-sized relocations
- // incorrectly (applying the addend twice). We should be able to return true
- // unconditionally by this point when that's fixed.
- return false;
+ return true;
}
void AArch64MachObjectWriter::recordRelocation(
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
index 92552c3d41d5..e1d6dd7a056b 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
@@ -12,7 +12,6 @@
#include "AArch64TargetStreamer.h"
#include "AArch64MCAsmInfo.h"
-#include "AArch64Subtarget.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/ConstantPools.h"
#include "llvm/MC/MCContext.h"
@@ -76,10 +75,10 @@ void AArch64TargetStreamer::emitNoteSection(unsigned Flags) {
return;
}
MCSection *Cur = OutStreamer.getCurrentSectionOnly();
- OutStreamer.SwitchSection(Nt);
+ OutStreamer.switchSection(Nt);
// Emit the note header.
- OutStreamer.emitValueToAlignment(Align(8).value());
+ OutStreamer.emitValueToAlignment(Align(8));
OutStreamer.emitIntValue(4, 4); // data size for "GNU\0"
OutStreamer.emitIntValue(4 * 4, 4); // Elf_Prop size
OutStreamer.emitIntValue(ELF::NT_GNU_PROPERTY_TYPE_0, 4);
@@ -92,7 +91,7 @@ void AArch64TargetStreamer::emitNoteSection(unsigned Flags) {
OutStreamer.emitIntValue(0, 4); // pad
OutStreamer.endSection(Nt);
- OutStreamer.SwitchSection(Cur);
+ OutStreamer.switchSection(Cur);
}
void AArch64TargetStreamer::emitInst(uint32_t Inst) {
@@ -119,3 +118,7 @@ llvm::createAArch64ObjectTargetStreamer(MCStreamer &S,
return new AArch64TargetWinCOFFStreamer(S);
return nullptr;
}
+
+MCTargetStreamer *llvm::createAArch64NullTargetStreamer(MCStreamer &S) {
+ return new AArch64TargetStreamer(S);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
index 86c7baf8f429..7676d88a82b5 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
@@ -30,7 +30,7 @@ public:
/// MCExpr that can be used to refer to the constant pool location.
const MCExpr *addConstantPoolEntry(const MCExpr *, unsigned Size, SMLoc Loc);
- /// Callback used to implemnt the .ltorg directive.
+ /// Callback used to implement the .ltorg directive.
/// Emit contents of constant pool for the current section.
void emitCurrentConstantPool();
@@ -66,7 +66,21 @@ public:
virtual void emitARM64WinCFITrapFrame() {}
virtual void emitARM64WinCFIMachineFrame() {}
virtual void emitARM64WinCFIContext() {}
+ virtual void emitARM64WinCFIECContext() {}
virtual void emitARM64WinCFIClearUnwoundToCall() {}
+ virtual void emitARM64WinCFIPACSignLR() {}
+ virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegD(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegDP(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegQ(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegIX(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegIPX(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegDX(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegDPX(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegQX(unsigned Reg, int Offset) {}
+ virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset) {}
private:
std::unique_ptr<AssemblerConstantPools> ConstantPools;
@@ -119,7 +133,21 @@ public:
void emitARM64WinCFITrapFrame() override;
void emitARM64WinCFIMachineFrame() override;
void emitARM64WinCFIContext() override;
+ void emitARM64WinCFIECContext() override;
void emitARM64WinCFIClearUnwoundToCall() override;
+ void emitARM64WinCFIPACSignLR() override;
+ void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegD(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegDP(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegQ(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegIX(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegIPX(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegDX(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegDPX(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegQX(unsigned Reg, int Offset) override;
+ void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset) override;
private:
void emitARM64WinUnwindCode(unsigned UnwindCode, int Reg, int Offset);
@@ -128,6 +156,8 @@ private:
MCTargetStreamer *
createAArch64ObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI);
+MCTargetStreamer *createAArch64NullTargetStreamer(MCStreamer &S);
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp
index 0072af4cc16e..05c7d76f0af3 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp
@@ -19,6 +19,7 @@
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/MCWinCOFFObjectWriter.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
@@ -29,8 +30,10 @@ namespace {
class AArch64WinCOFFObjectWriter : public MCWinCOFFObjectTargetWriter {
public:
- AArch64WinCOFFObjectWriter()
- : MCWinCOFFObjectTargetWriter(COFF::IMAGE_FILE_MACHINE_ARM64) {}
+ AArch64WinCOFFObjectWriter(const Triple &TheTriple)
+ : MCWinCOFFObjectTargetWriter(TheTriple.isWindowsArm64EC()
+ ? COFF::IMAGE_FILE_MACHINE_ARM64EC
+ : COFF::IMAGE_FILE_MACHINE_ARM64) {}
~AArch64WinCOFFObjectWriter() override = default;
@@ -158,6 +161,7 @@ bool AArch64WinCOFFObjectWriter::recordRelocation(const MCFixup &Fixup) const {
return true;
}
-std::unique_ptr<MCObjectTargetWriter> llvm::createAArch64WinCOFFObjectWriter() {
- return std::make_unique<AArch64WinCOFFObjectWriter>();
+std::unique_ptr<MCObjectTargetWriter>
+llvm::createAArch64WinCOFFObjectWriter(const Triple &TheTriple) {
+ return std::make_unique<AArch64WinCOFFObjectWriter>(TheTriple);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp
index b688165d3a7b..438ac6cc4788 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp
@@ -8,6 +8,7 @@
#include "AArch64WinCOFFStreamer.h"
#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCWin64EH.h"
@@ -26,14 +27,14 @@ public:
std::unique_ptr<MCObjectWriter> OW)
: MCWinCOFFStreamer(C, std::move(AB), std::move(CE), std::move(OW)) {}
- void EmitWinEHHandlerData(SMLoc Loc) override;
- void EmitWindowsUnwindTables() override;
- void EmitWindowsUnwindTables(WinEH::FrameInfo *Frame) override;
+ void emitWinEHHandlerData(SMLoc Loc) override;
+ void emitWindowsUnwindTables() override;
+ void emitWindowsUnwindTables(WinEH::FrameInfo *Frame) override;
void finishImpl() override;
};
-void AArch64WinCOFFStreamer::EmitWinEHHandlerData(SMLoc Loc) {
- MCStreamer::EmitWinEHHandlerData(Loc);
+void AArch64WinCOFFStreamer::emitWinEHHandlerData(SMLoc Loc) {
+ MCStreamer::emitWinEHHandlerData(Loc);
// We have to emit the unwind info now, because this directive
// actually switches to the .xdata section!
@@ -41,11 +42,11 @@ void AArch64WinCOFFStreamer::EmitWinEHHandlerData(SMLoc Loc) {
/* HandlerData = */ true);
}
-void AArch64WinCOFFStreamer::EmitWindowsUnwindTables(WinEH::FrameInfo *Frame) {
+void AArch64WinCOFFStreamer::emitWindowsUnwindTables(WinEH::FrameInfo *Frame) {
EHStreamer.EmitUnwindInfo(*this, Frame, /* HandlerData = */ false);
}
-void AArch64WinCOFFStreamer::EmitWindowsUnwindTables() {
+void AArch64WinCOFFStreamer::emitWindowsUnwindTables() {
if (!getNumWinFrameInfos())
return;
EHStreamer.Emit(*this);
@@ -53,7 +54,7 @@ void AArch64WinCOFFStreamer::EmitWindowsUnwindTables() {
void AArch64WinCOFFStreamer::finishImpl() {
emitFrames(nullptr);
- EmitWindowsUnwindTables();
+ emitWindowsUnwindTables();
MCWinCOFFStreamer::finishImpl();
}
@@ -71,10 +72,9 @@ void AArch64TargetWinCOFFStreamer::emitARM64WinUnwindCode(unsigned UnwindCode,
WinEH::FrameInfo *CurFrame = S.EnsureValidWinFrameInfo(SMLoc());
if (!CurFrame)
return;
- MCSymbol *Label = S.emitCFILabel();
- auto Inst = WinEH::Instruction(UnwindCode, Label, Reg, Offset);
+ auto Inst = WinEH::Instruction(UnwindCode, /*Label=*/nullptr, Reg, Offset);
if (InEpilogCFI)
- CurFrame->EpilogMap[CurrentEpilog].push_back(Inst);
+ CurFrame->EpilogMap[CurrentEpilog].Instructions.push_back(Inst);
else
CurFrame->Instructions.push_back(Inst);
}
@@ -176,7 +176,8 @@ void AArch64TargetWinCOFFStreamer::emitARM64WinCFIPrologEnd() {
MCSymbol *Label = S.emitCFILabel();
CurFrame->PrologEnd = Label;
- WinEH::Instruction Inst = WinEH::Instruction(Win64EH::UOP_End, Label, -1, 0);
+ WinEH::Instruction Inst =
+ WinEH::Instruction(Win64EH::UOP_End, /*Label=*/nullptr, -1, 0);
auto it = CurFrame->Instructions.begin();
CurFrame->Instructions.insert(it, Inst);
}
@@ -198,9 +199,11 @@ void AArch64TargetWinCOFFStreamer::emitARM64WinCFIEpilogEnd() {
return;
InEpilogCFI = false;
+ WinEH::Instruction Inst =
+ WinEH::Instruction(Win64EH::UOP_End, /*Label=*/nullptr, -1, 0);
+ CurFrame->EpilogMap[CurrentEpilog].Instructions.push_back(Inst);
MCSymbol *Label = S.emitCFILabel();
- WinEH::Instruction Inst = WinEH::Instruction(Win64EH::UOP_End, Label, -1, 0);
- CurFrame->EpilogMap[CurrentEpilog].push_back(Inst);
+ CurFrame->EpilogMap[CurrentEpilog].End = Label;
CurrentEpilog = nullptr;
}
@@ -216,10 +219,78 @@ void AArch64TargetWinCOFFStreamer::emitARM64WinCFIContext() {
emitARM64WinUnwindCode(Win64EH::UOP_Context, -1, 0);
}
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFIECContext() {
+ emitARM64WinUnwindCode(Win64EH::UOP_ECContext, -1, 0);
+}
+
void AArch64TargetWinCOFFStreamer::emitARM64WinCFIClearUnwoundToCall() {
emitARM64WinUnwindCode(Win64EH::UOP_ClearUnwoundToCall, -1, 0);
}
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFIPACSignLR() {
+ emitARM64WinUnwindCode(Win64EH::UOP_PACSignLR, -1, 0);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegI(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegI, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegIP(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegIP, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegD(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegD, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegDP(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegDP, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegQ(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegQ, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegQP(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegQP, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegIX(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegIX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegIPX(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegIPX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegDX(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegDX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegDPX(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegDPX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegQX(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegQX, Reg, Offset);
+}
+
+void AArch64TargetWinCOFFStreamer::emitARM64WinCFISaveAnyRegQPX(unsigned Reg,
+ int Offset) {
+ emitARM64WinUnwindCode(Win64EH::UOP_SaveAnyRegQPX, Reg, Offset);
+}
+
MCWinCOFFStreamer *llvm::createAArch64WinCOFFStreamer(
MCContext &Context, std::unique_ptr<MCAsmBackend> MAB,
std::unique_ptr<MCObjectWriter> OW, std::unique_ptr<MCCodeEmitter> Emitter,