aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp')
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp939
1 files changed, 802 insertions, 137 deletions
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index e163ebfa2348..f5a5d689fa09 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -17,29 +17,116 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/AArch64TargetParser.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/ARMTargetParserCommon.h"
+#include <optional>
using namespace clang;
using namespace clang::targets;
-const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsSVE.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#include "clang/Basic/BuiltinsSME.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
- {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
- {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
#include "clang/Basic/BuiltinsAArch64.def"
};
+void AArch64TargetInfo::setArchFeatures() {
+ if (*ArchInfo == llvm::AArch64::ARMV8R) {
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ HasRCPC = true;
+ FPU |= NeonMode;
+ HasCCPP = true;
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
+ } else if (ArchInfo->Version.getMajor() == 8) {
+ if (ArchInfo->Version.getMinor() >= 7u) {
+ HasWFxT = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 6u) {
+ HasBFloat16 = true;
+ HasMatMul = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 5u) {
+ HasAlternativeNZCV = true;
+ HasFRInt3264 = true;
+ HasSSBS = true;
+ HasSB = true;
+ HasPredRes = true;
+ HasBTI = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 4u) {
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 3u) {
+ HasRCPC = true;
+ FPU |= NeonMode;
+ }
+ if (ArchInfo->Version.getMinor() >= 2u) {
+ HasCCPP = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 1u) {
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
+ }
+ } else if (ArchInfo->Version.getMajor() == 9) {
+ if (ArchInfo->Version.getMinor() >= 2u) {
+ HasWFxT = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 1u) {
+ HasBFloat16 = true;
+ HasMatMul = true;
+ }
+ FPU |= SveMode;
+ HasSVE2 = true;
+ HasFullFP16 = true;
+ HasAlternativeNZCV = true;
+ HasFRInt3264 = true;
+ HasSSBS = true;
+ HasSB = true;
+ HasPredRes = true;
+ HasBTI = true;
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ HasRCPC = true;
+ FPU |= NeonMode;
+ HasCCPP = true;
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
+ }
+}
+
AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), ABI("aapcs") {
@@ -56,7 +143,9 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
// All AArch64 implementations support ARMv8 FP, which makes half a legal type.
HasLegalHalfType = true;
+ HalfArgsAndReturns = true;
HasFloat16 = true;
+ HasStrictFP = true;
if (Triple.isArch64Bit())
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
@@ -117,11 +206,11 @@ bool AArch64TargetInfo::setABI(const std::string &Name) {
return true;
}
-bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
+bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
BranchProtectionInfo &BPI,
StringRef &Err) const {
- llvm::AArch64::ParsedBranchProtection PBP;
- if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
+ llvm::ARM::ParsedBranchProtection PBP;
+ if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
return false;
BPI.SignReturnAddr =
@@ -136,12 +225,13 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
+ BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
+ BPI.GuardedControlStack = PBP.GuardedControlStack;
return true;
}
bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
- return Name == "generic" ||
- llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
+ return Name == "generic" || llvm::AArch64::parseCpu(Name);
}
bool AArch64TargetInfo::setCPU(const std::string &Name) {
@@ -156,8 +246,6 @@ void AArch64TargetInfo::fillValidCPUList(
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
- Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
- Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
}
void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
@@ -183,6 +271,7 @@ void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
+ Builder.defineMacro("__ARM_FEATURE_BTI", "1");
// Also include the Armv8.4 defines
getTargetDefinesARMV84A(Opts, Builder);
}
@@ -203,21 +292,72 @@ void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
getTargetDefinesARMV86A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.7 defines
+ getTargetDefinesARMV87A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.8 defines
+ getTargetDefinesARMV88A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9-A maps to Armv8.5-A
+ getTargetDefinesARMV85A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.1-A maps to Armv8.6-A
+ getTargetDefinesARMV86A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.2-A maps to Armv8.7-A
+ getTargetDefinesARMV87A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.3-A maps to Armv8.8-A
+ getTargetDefinesARMV88A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.4-A maps to Armv8.9-A
+ getTargetDefinesARMV89A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
+ getTargetDefinesARMV94A(Opts, Builder);
+}
+
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
- Builder.defineMacro("__aarch64__");
- // For bare-metal.
- if (getTriple().getOS() == llvm::Triple::UnknownOS &&
- getTriple().isOSBinFormatELF())
- Builder.defineMacro("__ELF__");
-
- // Target properties.
- if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
- Builder.defineMacro("_LP64");
- Builder.defineMacro("__LP64__");
+ if (getTriple().isWindowsArm64EC()) {
+ // Define the same set of macros as would be defined on x86_64 to ensure that
+ // ARM64EC datatype layouts match those of x86_64 compiled code
+ Builder.defineMacro("__amd64__");
+ Builder.defineMacro("__amd64");
+ Builder.defineMacro("__x86_64");
+ Builder.defineMacro("__x86_64__");
+ Builder.defineMacro("__arm64ec__");
+ } else {
+ Builder.defineMacro("__aarch64__");
}
+ // Inline assembly supports AArch64 flag outputs.
+ Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
+
std::string CodeModel = getTargetOpts().CodeModel;
if (CodeModel == "default")
CodeModel = "small";
@@ -227,8 +367,10 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
// ACLE predefines. Many can only have one possible value on v8 AArch64.
Builder.defineMacro("__ARM_ACLE", "200");
- Builder.defineMacro("__ARM_ARCH", "8");
- Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
+ Builder.defineMacro("__ARM_ARCH",
+ std::to_string(ArchInfo->Version.getMajor()));
+ Builder.defineMacro("__ARM_ARCH_PROFILE",
+ std::string("'") + (char)ArchInfo->Profile + "'");
Builder.defineMacro("__ARM_64BIT_STATE", "1");
Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
@@ -244,8 +386,14 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
+ // These macros are set when Clang can parse declarations with these
+ // attributes.
+ Builder.defineMacro("__ARM_STATE_ZA", "1");
+ Builder.defineMacro("__ARM_STATE_ZT0", "1");
+
// 0xe implies support for half, single and double precision operations.
- Builder.defineMacro("__ARM_FP", "0xE");
+ if (FPU & FPUMode)
+ Builder.defineMacro("__ARM_FP", "0xE");
// PCS specifies this for SysV variants, which is all we support. Other ABIs
// may choose __ARM_FP16_FORMAT_ALTERNATIVE.
@@ -269,6 +417,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (FPU & SveMode)
Builder.defineMacro("__ARM_FEATURE_SVE", "1");
+ if ((FPU & NeonMode) && (FPU & SveMode))
+ Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
+
if (HasSVE2)
Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
@@ -284,9 +435,28 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasSVE2 && HasSVE2SM4)
Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
+ if (HasSME) {
+ Builder.defineMacro("__ARM_FEATURE_SME");
+ Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
+ }
+
+ if (HasSME2) {
+ Builder.defineMacro("__ARM_FEATURE_SME");
+ Builder.defineMacro("__ARM_FEATURE_SME2");
+ Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
+ }
+
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
+ if (HasRCPC3)
+ Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
+ else if (HasRCPC)
+ Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
+
+ if (HasFMV)
+ Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
+
// The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
// macros for AES, SHA2, SHA3 and SM4
if (HasAES && HasSHA2)
@@ -308,6 +478,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_FEATURE_SM4", "1");
}
+ if (HasPAuth)
+ Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
+
if (HasUnaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
@@ -375,178 +548,416 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (Opts.BranchTargetEnforcement)
Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
+ if (Opts.GuardedControlStack)
+ Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
+
if (HasLS64)
Builder.defineMacro("__ARM_FEATURE_LS64", "1");
if (HasRandGen)
Builder.defineMacro("__ARM_FEATURE_RNG", "1");
- switch (ArchKind) {
- default:
- break;
- case llvm::AArch64::ArchKind::ARMV8_1A:
+ if (HasMOPS)
+ Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
+
+ if (HasD128)
+ Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
+
+ if (HasGCS)
+ Builder.defineMacro("__ARM_FEATURE_GCS", "1");
+
+ if (*ArchInfo == llvm::AArch64::ARMV8_1A)
getTargetDefinesARMV81A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_2A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
getTargetDefinesARMV82A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_3A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
getTargetDefinesARMV83A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_4A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
getTargetDefinesARMV84A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_5A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
getTargetDefinesARMV85A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_6A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
getTargetDefinesARMV86A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_7A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
getTargetDefinesARMV87A(Opts, Builder);
- break;
- }
-
- // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
+ else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
+ getTargetDefinesARMV88A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
+ getTargetDefinesARMV89A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9A)
+ getTargetDefinesARMV9A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
+ getTargetDefinesARMV91A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
+ getTargetDefinesARMV92A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
+ getTargetDefinesARMV93A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
+ getTargetDefinesARMV94A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
+ getTargetDefinesARMV95A(Opts, Builder);
+
+ // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
+
+ // Allow detection of fast FMA support.
+ Builder.defineMacro("__FP_FAST_FMA", "1");
+ Builder.defineMacro("__FP_FAST_FMAF", "1");
+
+ // C/C++ operators work on both VLS and VLA SVE types
+ if (FPU & SveMode)
+ Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
- if (Opts.ArmSveVectorBits) {
- Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
- Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
+ if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
+ Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
}
}
ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
+}
+
+std::optional<std::pair<unsigned, unsigned>>
+AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
+ if (LangOpts.VScaleMin || LangOpts.VScaleMax)
+ return std::pair<unsigned, unsigned>(
+ LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
+
+ if (hasFeature("sve"))
+ return std::pair<unsigned, unsigned>(1, 16);
+
+ return std::nullopt;
+}
+
+unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
+ if (Name == "default")
+ return 0;
+ if (auto Ext = llvm::AArch64::parseArchExtension(Name))
+ return Ext->FmvPriority;
+ return 0;
+}
+
+unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
+ // Take the maximum priority as per feature cost, so more features win.
+ return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
+}
+
+bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
+ if (auto Ext = llvm::AArch64::parseArchExtension(Name))
+ return !Ext->DependentFeatures.empty();
+ return false;
+}
+
+StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
+ if (auto Ext = llvm::AArch64::parseArchExtension(Name))
+ return Ext->DependentFeatures;
+ return StringRef();
+}
+
+bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
+ return llvm::AArch64::parseArchExtension(FeatureStr).has_value();
}
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
- return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
- (Feature == "neon" && (FPU & NeonMode)) ||
- ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
- Feature == "sve2-aes" || Feature == "sve2-sha3" ||
- Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
- Feature == "i8mm" || Feature == "bf16") &&
- (FPU & SveMode)) ||
- (Feature == "ls64" && HasLS64);
+ return llvm::StringSwitch<bool>(Feature)
+ .Cases("aarch64", "arm64", "arm", true)
+ .Case("fmv", HasFMV)
+ .Cases("neon", "fp", "simd", FPU & NeonMode)
+ .Case("jscvt", HasJSCVT)
+ .Case("fcma", HasFCMA)
+ .Case("rng", HasRandGen)
+ .Case("flagm", HasFlagM)
+ .Case("flagm2", HasAlternativeNZCV)
+ .Case("fp16fml", HasFP16FML)
+ .Case("dotprod", HasDotProd)
+ .Case("sm4", HasSM4)
+ .Case("rdm", HasRDM)
+ .Case("lse", HasLSE)
+ .Case("crc", HasCRC)
+ .Case("sha2", HasSHA2)
+ .Case("sha3", HasSHA3)
+ .Cases("aes", "pmull", HasAES)
+ .Cases("fp16", "fullfp16", HasFullFP16)
+ .Case("dit", HasDIT)
+ .Case("dpb", HasCCPP)
+ .Case("dpb2", HasCCDP)
+ .Case("rcpc", HasRCPC)
+ .Case("frintts", HasFRInt3264)
+ .Case("i8mm", HasMatMul)
+ .Case("bf16", HasBFloat16)
+ .Case("sve", FPU & SveMode)
+ .Case("sve-bf16", FPU & SveMode && HasBFloat16)
+ .Case("sve-i8mm", FPU & SveMode && HasMatMul)
+ .Case("f32mm", FPU & SveMode && HasMatmulFP32)
+ .Case("f64mm", FPU & SveMode && HasMatmulFP64)
+ .Case("sve2", FPU & SveMode && HasSVE2)
+ .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
+ .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
+ .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
+ .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
+ .Case("sme", HasSME)
+ .Case("sme2", HasSME2)
+ .Case("sme-f64f64", HasSMEF64F64)
+ .Case("sme-i16i64", HasSMEI16I64)
+ .Case("sme-fa64", HasSMEFA64)
+ .Cases("memtag", "memtag2", HasMTE)
+ .Case("sb", HasSB)
+ .Case("predres", HasPredRes)
+ .Cases("ssbs", "ssbs2", HasSSBS)
+ .Case("bti", HasBTI)
+ .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
+ .Case("wfxt", HasWFxT)
+ .Case("rcpc3", HasRCPC3)
+ .Default(false);
+}
+
+void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) const {
+ Features[Name] = Enabled;
+ // If the feature is an architecture feature (like v8.2a), add all previous
+ // architecture versions and any dependant target features.
+ const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
+ llvm::AArch64::ArchInfo::findBySubArch(Name);
+
+ if (!ArchInfo)
+ return; // Not an architecture, nothing more to do.
+
+ // Disabling an architecture feature does not affect dependent features
+ if (!Enabled)
+ return;
+
+ for (const auto *OtherArch : llvm::AArch64::ArchInfos)
+ if (ArchInfo->implies(*OtherArch))
+ Features[OtherArch->getSubArch()] = true;
+
+ // Set any features implied by the architecture
+ std::vector<StringRef> CPUFeats;
+ if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
+ for (auto F : CPUFeats) {
+ assert(F[0] == '+' && "Expected + in target feature!");
+ Features[F.drop_front(1)] = true;
+ }
+ }
}
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
- FPU = FPUMode;
- HasCRC = false;
- HasCrypto = false;
- HasAES = false;
- HasSHA2 = false;
- HasSHA3 = false;
- HasSM4 = false;
- HasUnaligned = true;
- HasFullFP16 = false;
- HasDotProd = false;
- HasFP16FML = false;
- HasMTE = false;
- HasTME = false;
- HasLS64 = false;
- HasRandGen = false;
- HasMatMul = false;
- HasBFloat16 = false;
- HasSVE2 = false;
- HasSVE2AES = false;
- HasSVE2SHA3 = false;
- HasSVE2SM4 = false;
- HasSVE2BitPerm = false;
- HasMatmulFP64 = false;
- HasMatmulFP32 = false;
- HasLSE = false;
-
- ArchKind = llvm::AArch64::ArchKind::ARMV8A;
-
for (const auto &Feature : Features) {
- if (Feature == "+neon")
+ if (Feature == "-fp-armv8")
+ HasNoFP = true;
+ if (Feature == "-neon")
+ HasNoNeon = true;
+ if (Feature == "-sve")
+ HasNoSVE = true;
+
+ if (Feature == "+neon" || Feature == "+fp-armv8")
+ FPU |= NeonMode;
+ if (Feature == "+jscvt") {
+ HasJSCVT = true;
+ FPU |= NeonMode;
+ }
+ if (Feature == "+fcma") {
+ HasFCMA = true;
FPU |= NeonMode;
+ }
+
if (Feature == "+sve") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
+ HasFullFP16 = true;
}
if (Feature == "+sve2") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
}
if (Feature == "+sve2-aes") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2AES = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2AES = true;
}
if (Feature == "+sve2-sha3") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2SHA3 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2SHA3 = true;
}
if (Feature == "+sve2-sm4") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2SM4 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2SM4 = true;
}
if (Feature == "+sve2-bitperm") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2BitPerm = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2BitPerm = true;
}
if (Feature == "+f32mm") {
+ FPU |= NeonMode;
FPU |= SveMode;
+ HasFullFP16 = true;
HasMatmulFP32 = true;
}
if (Feature == "+f64mm") {
+ FPU |= NeonMode;
FPU |= SveMode;
+ HasFullFP16 = true;
HasMatmulFP64 = true;
}
+ if (Feature == "+sme") {
+ HasSME = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
+ if (Feature == "+sme2") {
+ HasSME = true;
+ HasSME2 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
+ if (Feature == "+sme-f64f64") {
+ HasSME = true;
+ HasSMEF64F64 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
+ if (Feature == "+sme-i16i64") {
+ HasSME = true;
+ HasSMEI16I64 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
+ if (Feature == "+sme-fa64") {
+ FPU |= NeonMode;
+ FPU |= SveMode;
+ HasSME = true;
+ HasSVE2 = true;
+ HasSMEFA64 = true;
+ }
+ if (Feature == "+sb")
+ HasSB = true;
+ if (Feature == "+predres")
+ HasPredRes = true;
+ if (Feature == "+ssbs")
+ HasSSBS = true;
+ if (Feature == "+bti")
+ HasBTI = true;
+ if (Feature == "+wfxt")
+ HasWFxT = true;
+ if (Feature == "-fmv")
+ HasFMV = false;
if (Feature == "+crc")
HasCRC = true;
- if (Feature == "+crypto")
- HasCrypto = true;
- if (Feature == "+aes")
+ if (Feature == "+rcpc")
+ HasRCPC = true;
+ if (Feature == "+aes") {
+ FPU |= NeonMode;
HasAES = true;
- if (Feature == "+sha2")
+ }
+ if (Feature == "+sha2") {
+ FPU |= NeonMode;
HasSHA2 = true;
+ }
if (Feature == "+sha3") {
+ FPU |= NeonMode;
HasSHA2 = true;
HasSHA3 = true;
}
- if (Feature == "+sm4")
+ if (Feature == "+rdm") {
+ FPU |= NeonMode;
+ HasRDM = true;
+ }
+ if (Feature == "+dit")
+ HasDIT = true;
+ if (Feature == "+cccp")
+ HasCCPP = true;
+ if (Feature == "+ccdp") {
+ HasCCPP = true;
+ HasCCDP = true;
+ }
+ if (Feature == "+fptoint")
+ HasFRInt3264 = true;
+ if (Feature == "+sm4") {
+ FPU |= NeonMode;
HasSM4 = true;
+ }
if (Feature == "+strict-align")
HasUnaligned = false;
- if (Feature == "+v8.1a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
- if (Feature == "+v8.2a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
- if (Feature == "+v8.3a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
- if (Feature == "+v8.4a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
- if (Feature == "+v8.5a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
- if (Feature == "+v8.6a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
- if (Feature == "+v8.7a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
+ // All predecessor archs are added but select the latest one for ArchKind.
+ if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8A;
+ if (Feature == "+v8.1a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_1A;
+ if (Feature == "+v8.2a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_2A;
+ if (Feature == "+v8.3a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_3A;
+ if (Feature == "+v8.4a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_4A;
+ if (Feature == "+v8.5a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_5A;
+ if (Feature == "+v8.6a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_6A;
+ if (Feature == "+v8.7a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_7A;
+ if (Feature == "+v8.8a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_8A;
+ if (Feature == "+v8.9a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_9A;
+ if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9A;
+ if (Feature == "+v9.1a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_1A;
+ if (Feature == "+v9.2a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_2A;
+ if (Feature == "+v9.3a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_3A;
+ if (Feature == "+v9.4a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_4A;
+ if (Feature == "+v9.5a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_5A;
if (Feature == "+v8r")
- ArchKind = llvm::AArch64::ArchKind::ARMV8R;
- if (Feature == "+fullfp16")
+ ArchInfo = &llvm::AArch64::ARMV8R;
+ if (Feature == "+fullfp16") {
+ FPU |= NeonMode;
HasFullFP16 = true;
- if (Feature == "+dotprod")
+ }
+ if (Feature == "+dotprod") {
+ FPU |= NeonMode;
HasDotProd = true;
- if (Feature == "+fp16fml")
+ }
+ if (Feature == "+fp16fml") {
+ FPU |= NeonMode;
+ HasFullFP16 = true;
HasFP16FML = true;
+ }
if (Feature == "+mte")
HasMTE = true;
if (Feature == "+tme")
@@ -565,10 +976,188 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasRandGen = true;
if (Feature == "+flagm")
HasFlagM = true;
+ if (Feature == "+altnzcv") {
+ HasFlagM = true;
+ HasAlternativeNZCV = true;
+ }
+ if (Feature == "+mops")
+ HasMOPS = true;
+ if (Feature == "+d128")
+ HasD128 = true;
+ if (Feature == "+gcs")
+ HasGCS = true;
+ if (Feature == "+rcpc3")
+ HasRCPC3 = true;
+ }
+
+ // Check features that are manually disabled by command line options.
+ // This needs to be checked after architecture-related features are handled,
+ // making sure they are properly disabled when required.
+ for (const auto &Feature : Features) {
+ if (Feature == "-d128")
+ HasD128 = false;
}
setDataLayout();
+ setArchFeatures();
+
+ if (HasNoFP) {
+ FPU &= ~FPUMode;
+ FPU &= ~NeonMode;
+ FPU &= ~SveMode;
+ }
+ if (HasNoNeon) {
+ FPU &= ~NeonMode;
+ FPU &= ~SveMode;
+ }
+ if (HasNoSVE)
+ FPU &= ~SveMode;
+
+ return true;
+}
+
+bool AArch64TargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ std::vector<std::string> UpdatedFeaturesVec;
+ // Parse the CPU and add any implied features.
+ std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
+ if (CpuInfo) {
+ auto Exts = CpuInfo->getImpliedExtensions();
+ std::vector<StringRef> CPUFeats;
+ llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
+ for (auto F : CPUFeats) {
+ assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
+ UpdatedFeaturesVec.push_back(F.str());
+ }
+ }
+
+ // Process target and dependent features. This is done in two loops collecting
+ // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
+ // add target '+/-'features that can later disable some of features added on
+ // the first loop. Function Multi Versioning features begin with '?'.
+ for (const auto &Feature : FeaturesVec)
+ if (((Feature[0] == '?' || Feature[0] == '+')) &&
+ AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
+ StringRef DepFeatures =
+ AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
+ SmallVector<StringRef, 1> AttrFeatures;
+ DepFeatures.split(AttrFeatures, ",");
+ for (auto F : AttrFeatures)
+ UpdatedFeaturesVec.push_back(F.str());
+ }
+ for (const auto &Feature : FeaturesVec)
+ if (Feature[0] != '?') {
+ std::string UpdatedFeature = Feature;
+ if (Feature[0] == '+') {
+ std::optional<llvm::AArch64::ExtensionInfo> Extension =
+ llvm::AArch64::parseArchExtension(Feature.substr(1));
+ if (Extension)
+ UpdatedFeature = Extension->Feature.str();
+ }
+ UpdatedFeaturesVec.push_back(UpdatedFeature);
+ }
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
+}
+
+// Parse AArch64 Target attributes, which are a comma separated list of:
+// "arch=<arch>" - parsed to features as per -march=..
+// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
+// "tune=<cpu>" - TuneCPU set to <cpu>
+// "feature", "no-feature" - Add (or remove) feature.
+// "+feature", "+nofeature" - Add (or remove) feature.
+ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
+ ParsedTargetAttr Ret;
+ if (Features == "default")
+ return Ret;
+ SmallVector<StringRef, 1> AttrFeatures;
+ Features.split(AttrFeatures, ",");
+ bool FoundArch = false;
+
+ auto SplitAndAddFeatures = [](StringRef FeatString,
+ std::vector<std::string> &Features) {
+ SmallVector<StringRef, 8> SplitFeatures;
+ FeatString.split(SplitFeatures, StringRef("+"), -1, false);
+ for (StringRef Feature : SplitFeatures) {
+ StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
+ if (!FeatureName.empty())
+ Features.push_back(FeatureName.str());
+ else
+ // Pushing the original feature string to give a sema error later on
+ // when they get checked.
+ if (Feature.starts_with("no"))
+ Features.push_back("-" + Feature.drop_front(2).str());
+ else
+ Features.push_back("+" + Feature.str());
+ }
+ };
+
+ for (auto &Feature : AttrFeatures) {
+ Feature = Feature.trim();
+ if (Feature.starts_with("fpmath="))
+ continue;
+
+ if (Feature.starts_with("branch-protection=")) {
+ Ret.BranchProtection = Feature.split('=').second.trim();
+ continue;
+ }
+
+ if (Feature.starts_with("arch=")) {
+ if (FoundArch)
+ Ret.Duplicate = "arch=";
+ FoundArch = true;
+ std::pair<StringRef, StringRef> Split =
+ Feature.split("=").second.trim().split("+");
+ const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Split.first);
+
+ // Parse the architecture version, adding the required features to
+ // Ret.Features.
+ if (!AI)
+ continue;
+ Ret.Features.push_back(AI->ArchFeature.str());
+ // Add any extra features, after the +
+ SplitAndAddFeatures(Split.second, Ret.Features);
+ } else if (Feature.starts_with("cpu=")) {
+ if (!Ret.CPU.empty())
+ Ret.Duplicate = "cpu=";
+ else {
+ // Split the cpu string into "cpu=", "cortex-a710" and any remaining
+ // "+feat" features.
+ std::pair<StringRef, StringRef> Split =
+ Feature.split("=").second.trim().split("+");
+ Ret.CPU = Split.first;
+ SplitAndAddFeatures(Split.second, Ret.Features);
+ }
+ } else if (Feature.starts_with("tune=")) {
+ if (!Ret.Tune.empty())
+ Ret.Duplicate = "tune=";
+ else
+ Ret.Tune = Feature.split("=").second.trim();
+ } else if (Feature.starts_with("+")) {
+ SplitAndAddFeatures(Feature, Ret.Features);
+ } else if (Feature.starts_with("no-")) {
+ StringRef FeatureName =
+ llvm::AArch64::getArchExtFeature(Feature.split("-").second);
+ if (!FeatureName.empty())
+ Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
+ else
+ Ret.Features.push_back("-" + Feature.split("-").second.str());
+ } else {
+ // Try parsing the string to the internal target feature name. If it is
+ // invalid, add the original string (which could already be an internal
+ // name). These should be checked later by isValidFeatureName.
+ StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
+ if (!FeatureName.empty())
+ Ret.Features.push_back(FeatureName.str());
+ else
+ Ret.Features.push_back("+" + Feature.str());
+ }
+ }
+ return Ret;
+}
+bool AArch64TargetInfo::hasBFloat16Type() const {
return true;
}
@@ -582,6 +1171,7 @@ AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_PreserveAll:
case CC_OpenCLKernel:
case CC_AArch64VectorCall:
+ case CC_AArch64SVEPCS:
case CC_Win64:
return CCCR_OK;
default:
@@ -596,6 +1186,8 @@ TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
}
const char *const AArch64TargetInfo::GCCRegNames[] = {
+ // clang-format off
+
// 32-bit Integer registers
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
"w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
@@ -628,11 +1220,20 @@ const char *const AArch64TargetInfo::GCCRegNames[] = {
// SVE predicate registers
"p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
- "p11", "p12", "p13", "p14", "p15"
+ "p11", "p12", "p13", "p14", "p15",
+
+ // SVE predicate-as-counter registers
+ "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
+ "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
+
+ // SME registers
+ "za", "zt0",
+
+ // clang-format on
};
ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
@@ -675,7 +1276,53 @@ const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
};
ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
+}
+
+// Returns the length of cc constraint.
+static unsigned matchAsmCCConstraint(const char *Name) {
+ constexpr unsigned len = 5;
+ auto RV = llvm::StringSwitch<unsigned>(Name)
+ .Case("@cceq", len)
+ .Case("@ccne", len)
+ .Case("@cchs", len)
+ .Case("@cccs", len)
+ .Case("@cccc", len)
+ .Case("@cclo", len)
+ .Case("@ccmi", len)
+ .Case("@ccpl", len)
+ .Case("@ccvs", len)
+ .Case("@ccvc", len)
+ .Case("@cchi", len)
+ .Case("@ccls", len)
+ .Case("@ccge", len)
+ .Case("@cclt", len)
+ .Case("@ccgt", len)
+ .Case("@ccle", len)
+ .Default(0);
+ return RV;
+}
+
+std::string
+AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
+ std::string R;
+ switch (*Constraint) {
+ case 'U': // Three-character constraint; add "@3" hint for later parsing.
+ R = std::string("@3") + std::string(Constraint, 3);
+ Constraint += 2;
+ break;
+ case '@':
+ if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
+ std::string Converted = "{" + std::string(Constraint, Len) + "}";
+ Constraint += Len - 1;
+ return Converted;
+ }
+ return std::string(1, *Constraint);
+ default:
+ R = TargetInfo::convertConstraint(Constraint);
+ break;
+ }
+ return R;
}
bool AArch64TargetInfo::validateAsmConstraint(
@@ -702,8 +1349,15 @@ bool AArch64TargetInfo::validateAsmConstraint(
Info.setAllowsRegister();
return true;
case 'U':
- if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
- // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
+ if (Name[1] == 'p' &&
+ (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
+ // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
+ Info.setAllowsRegister();
+ Name += 2;
+ return true;
+ }
+ if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
+ // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
Info.setAllowsRegister();
Name += 2;
return true;
@@ -725,6 +1379,13 @@ bool AArch64TargetInfo::validateAsmConstraint(
case 'y': // SVE registers (V0-V7)
Info.setAllowsRegister();
return true;
+ case '@':
+ // CC condition
+ if (const unsigned Len = matchAsmCCConstraint(Name)) {
+ Name += Len - 1;
+ Info.setAllowsRegister();
+ return true;
+ }
}
return false;
}
@@ -733,8 +1394,7 @@ bool AArch64TargetInfo::validateConstraintModifier(
StringRef Constraint, char Modifier, unsigned Size,
std::string &SuggestedModifier) const {
// Strip off constraint modifiers.
- while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
- Constraint = Constraint.substr(1);
+ Constraint = Constraint.ltrim("=+&");
switch (Constraint[0]) {
default:
@@ -763,7 +1423,7 @@ bool AArch64TargetInfo::validateConstraintModifier(
}
}
-const char *AArch64TargetInfo::getClobbers() const { return ""; }
+std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
if (RegNo == 0)
@@ -872,7 +1532,13 @@ MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
- Builder.defineMacro("_M_ARM64", "1");
+ if (getTriple().isWindowsArm64EC()) {
+ Builder.defineMacro("_M_X64", "100");
+ Builder.defineMacro("_M_AMD64", "100");
+ Builder.defineMacro("_M_ARM64EC", "1");
+ } else {
+ Builder.defineMacro("_M_ARM64", "1");
+ }
}
TargetInfo::CallingConvKind
@@ -936,7 +1602,6 @@ void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
else
Builder.defineMacro("__ARM64_ARCH_8__");
Builder.defineMacro("__ARM_NEON__");
- Builder.defineMacro("__LITTLE_ENDIAN__");
Builder.defineMacro("__REGISTER_PREFIX__", "");
Builder.defineMacro("__arm64", "1");
Builder.defineMacro("__arm64__", "1");