aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-06-01 20:58:49 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-06-01 20:58:49 +0000
commit416ada0f75bab22b084a1776deb229cd4a669c4d (patch)
tree6eb65f3790434471361628af6199b07a4de92de7
parent550ae89a710bf458d47e5b1d183f5e7039c2b384 (diff)
downloadsrc-416ada0f75bab22b084a1776deb229cd4a669c4d.tar.gz
src-416ada0f75bab22b084a1776deb229cd4a669c4d.zip
Vendor import of clang trunk r304460:vendor/clang/clang-trunk-r304460
Notes
Notes: svn path=/vendor/clang/dist/; revision=319463 svn path=/vendor/clang/clang-trunk-r304460/; revision=319464; tag=vendor/clang/clang-trunk-r304460
-rw-r--r--docs/Modules.rst2
-rw-r--r--docs/ThinLTO.rst2
-rw-r--r--docs/UndefinedBehaviorSanitizer.rst2
-rw-r--r--include/clang/AST/VTableBuilder.h22
-rw-r--r--include/clang/Basic/DiagnosticGroups.td1
-rw-r--r--include/clang/Basic/DiagnosticIDs.h2
-rw-r--r--include/clang/Basic/DiagnosticSemaKinds.td4
-rw-r--r--include/clang/Basic/Module.h4
-rw-r--r--include/clang/Basic/Sanitizers.def7
-rw-r--r--include/clang/Basic/TokenKinds.def2
-rw-r--r--include/clang/Basic/TypeTraits.h1
-rw-r--r--include/clang/Driver/CLCompatOptions.td7
-rw-r--r--include/clang/Driver/Options.td4
-rw-r--r--include/clang/Lex/HeaderSearch.h7
-rw-r--r--lib/AST/ODRHash.cpp28
-rw-r--r--lib/Basic/Targets.cpp16
-rw-r--r--lib/CodeGen/ABIInfo.h1
-rw-r--r--lib/CodeGen/CGCall.cpp18
-rw-r--r--lib/CodeGen/CGCleanup.cpp7
-rw-r--r--lib/CodeGen/CGExpr.cpp60
-rw-r--r--lib/CodeGen/CGExprScalar.cpp210
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp6
-rw-r--r--lib/CodeGen/CGVTables.cpp2
-rw-r--r--lib/CodeGen/CodeGenFunction.h8
-rw-r--r--lib/CodeGen/CodeGenModule.cpp31
-rw-r--r--lib/CodeGen/CodeGenModule.h11
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp47
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp3
-rw-r--r--lib/CodeGen/TargetInfo.cpp22
-rw-r--r--lib/Driver/ToolChains/Clang.cpp63
-rw-r--r--lib/Driver/ToolChains/Gnu.cpp12
-rw-r--r--lib/Format/UnwrappedLineParser.cpp25
-rw-r--r--lib/Frontend/CompilerInvocation.cpp4
-rw-r--r--lib/Frontend/FrontendAction.cpp20
-rw-r--r--lib/Lex/HeaderSearch.cpp17
-rw-r--r--lib/Sema/SemaCoroutine.cpp8
-rw-r--r--lib/Sema/SemaDecl.cpp11
-rw-r--r--lib/Sema/SemaDeclObjC.cpp4
-rw-r--r--lib/Sema/SemaExprCXX.cpp33
-rw-r--r--lib/Sema/SemaOverload.cpp298
-rw-r--r--lib/Sema/SemaType.cpp6
-rw-r--r--lib/Serialization/ASTWriter.cpp9
-rw-r--r--test/CodeGen/arm_neon_intrinsics.c1238
-rw-r--r--test/CodeGen/ubsan-pointer-overflow.m171
-rw-r--r--test/CodeGenCXX/stmtexpr.cpp30
-rw-r--r--test/CodeGenCXX/strict-vtable-pointers.cpp115
-rw-r--r--test/CodeGenCXX/vtable-available-externally.cpp32
-rw-r--r--test/CodeGenCXX/vtable-linkage.cpp6
-rw-r--r--test/CodeGenCoroutines/coro-await-domination.cpp38
-rw-r--r--test/CodeGenObjC/parameterized_classes.m28
-rw-r--r--test/CodeGenOpenCL/bool_cast.cl2
-rw-r--r--test/CodeGenOpenCL/kernel-attributes.cl6
-rw-r--r--test/CodeGenOpenCL/kernel-metadata.cl2
-rw-r--r--test/CodeGenOpenCL/kernels-have-spir-cc-by-default.cl65
-rw-r--r--test/CodeGenOpenCL/pipe_types.cl2
-rw-r--r--test/CodeGenOpenCL/ptx-calls.cl2
-rw-r--r--test/CodeGenOpenCL/ptx-kernels.cl2
-rw-r--r--test/Driver/arm-cortex-cpus.c12
-rw-r--r--test/Driver/cl-cc-flags.c33
-rw-r--r--test/Driver/cl-diagnostics.c28
-rw-r--r--test/Driver/cl-include.c14
-rw-r--r--test/Driver/cl-zc.cpp5
-rw-r--r--test/Driver/fsanitize.c18
-rw-r--r--test/Driver/gold-lto.c2
-rw-r--r--test/Driver/nacl-direct.c2
-rw-r--r--test/Driver/openmp-offload.c8
-rw-r--r--test/Misc/diag-mapping2.c1
-rw-r--r--test/Modules/odr_hash.cpp34
-rw-r--r--test/Modules/preprocess-module.cpp10
-rw-r--r--test/Modules/preprocess-nested.cpp2
-rw-r--r--test/Modules/preprocess-unavailable.cpp2
-rw-r--r--test/SemaCXX/attr-require-constant-initialization.cpp72
-rw-r--r--test/SemaCXX/coreturn.cpp2
-rw-r--r--test/SemaCXX/coroutine-uninitialized-warning-crash.cpp44
-rw-r--r--test/SemaCXX/coroutines.cpp22
-rw-r--r--test/SemaCXX/type-traits.cpp96
-rw-r--r--test/SemaObjC/attr-deprecated.m12
-rw-r--r--test/SemaObjC/class-unavail-warning.m4
-rw-r--r--test/SemaObjC/warn-deprecated-implementations.m5
-rw-r--r--unittests/Format/FormatTestJS.cpp9
-rw-r--r--utils/TableGen/ClangAttrEmitter.cpp50
-rw-r--r--utils/TableGen/ClangDiagnosticsEmitter.cpp4
-rw-r--r--utils/TableGen/ClangOptionDocEmitter.cpp13
-rw-r--r--utils/TableGen/ClangSACheckersEmitter.cpp3
84 files changed, 2274 insertions, 1019 deletions
diff --git a/docs/Modules.rst b/docs/Modules.rst
index b8841c0a5cec..7bd2adf8af52 100644
--- a/docs/Modules.rst
+++ b/docs/Modules.rst
@@ -403,7 +403,7 @@ A *requires-declaration* specifies the requirements that an importing translatio
*feature*:
``!``:sub:`opt` *identifier*
-The requirements clause allows specific modules or submodules to specify that they are only accessible with certain language dialects or on certain platforms. The feature list is a set of identifiers, defined below. If any of the features is not available in a given translation unit, that translation unit shall not import the module. The optional ``!`` indicates that a feature is incompatible with the module.
+The requirements clause allows specific modules or submodules to specify that they are only accessible with certain language dialects or on certain platforms. The feature list is a set of identifiers, defined below. If any of the features is not available in a given translation unit, that translation unit shall not import the module. When building a module for use by a compilation, submodules requiring unavailable features are ignored. The optional ``!`` indicates that a feature is incompatible with the module.
The following features are defined:
diff --git a/docs/ThinLTO.rst b/docs/ThinLTO.rst
index 7c7d9513025a..d417febda502 100644
--- a/docs/ThinLTO.rst
+++ b/docs/ThinLTO.rst
@@ -123,6 +123,8 @@ which currently must be enabled through a linker option.
``-Wl,-plugin-opt,cache-dir=/path/to/cache``
- ld64 (support in clang 3.9 and Xcode 8):
``-Wl,-cache_path_lto,/path/to/cache``
+- lld (as of LLVM r296702):
+ ``-Wl,--thinlto-cache-dir=/path/to/cache``
Clang Bootstrap
---------------
diff --git a/docs/UndefinedBehaviorSanitizer.rst b/docs/UndefinedBehaviorSanitizer.rst
index d6fdad2a0c01..ea776a770470 100644
--- a/docs/UndefinedBehaviorSanitizer.rst
+++ b/docs/UndefinedBehaviorSanitizer.rst
@@ -106,6 +106,8 @@ Available checks are:
invalid pointers. These checks are made in terms of
``__builtin_object_size``, and consequently may be able to detect more
problems at higher optimization levels.
+ - ``-fsanitize=pointer-overflow``: Performing pointer arithmetic which
+ overflows.
- ``-fsanitize=return``: In C++, reaching the end of a
value-returning function without returning a value.
- ``-fsanitize=returns-nonnull-attribute``: Returning null pointer
diff --git a/include/clang/AST/VTableBuilder.h b/include/clang/AST/VTableBuilder.h
index 5cbcf51dd69b..b0b71e473516 100644
--- a/include/clang/AST/VTableBuilder.h
+++ b/include/clang/AST/VTableBuilder.h
@@ -154,6 +154,28 @@ public:
bool isRTTIKind() const { return isRTTIKind(getKind()); }
+ GlobalDecl getGlobalDecl() const {
+ assert(isUsedFunctionPointerKind() &&
+ "GlobalDecl can be created only from virtual function");
+
+ auto *DtorDecl = dyn_cast<CXXDestructorDecl>(getFunctionDecl());
+ switch (getKind()) {
+ case CK_FunctionPointer:
+ return GlobalDecl(getFunctionDecl());
+ case CK_CompleteDtorPointer:
+ return GlobalDecl(DtorDecl, CXXDtorType::Dtor_Complete);
+ case CK_DeletingDtorPointer:
+ return GlobalDecl(DtorDecl, CXXDtorType::Dtor_Deleting);
+ case CK_VCallOffset:
+ case CK_VBaseOffset:
+ case CK_OffsetToTop:
+ case CK_RTTI:
+ case CK_UnusedFunctionPointer:
+ llvm_unreachable("Only function pointers kinds");
+ }
+ llvm_unreachable("Should already return");
+ }
+
private:
static bool isFunctionPointerKind(Kind ComponentKind) {
return isUsedFunctionPointerKind(ComponentKind) ||
diff --git a/include/clang/Basic/DiagnosticGroups.td b/include/clang/Basic/DiagnosticGroups.td
index e1a41584023c..cf404768472f 100644
--- a/include/clang/Basic/DiagnosticGroups.td
+++ b/include/clang/Basic/DiagnosticGroups.td
@@ -733,6 +733,7 @@ def Pedantic : DiagGroup<"pedantic">;
// Aliases.
def : DiagGroup<"", [Extra]>; // -W = -Wextra
def : DiagGroup<"endif-labels", [ExtraTokens]>; // -Wendif-labels=-Wextra-tokens
+def : DiagGroup<"cpp", [PoundWarning]>; // -Wcpp = -W#warnings
def : DiagGroup<"comments", [Comment]>; // -Wcomments = -Wcomment
def : DiagGroup<"conversion-null",
[NullConversion]>; // -Wconversion-null = -Wnull-conversion
diff --git a/include/clang/Basic/DiagnosticIDs.h b/include/clang/Basic/DiagnosticIDs.h
index 7646e33d2366..479d1978c62d 100644
--- a/include/clang/Basic/DiagnosticIDs.h
+++ b/include/clang/Basic/DiagnosticIDs.h
@@ -32,7 +32,7 @@ namespace clang {
DIAG_START_FRONTEND = DIAG_START_DRIVER + 200,
DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + 100,
DIAG_START_LEX = DIAG_START_SERIALIZATION + 120,
- DIAG_START_PARSE = DIAG_START_LEX + 300,
+ DIAG_START_PARSE = DIAG_START_LEX + 400,
DIAG_START_AST = DIAG_START_PARSE + 500,
DIAG_START_COMMENT = DIAG_START_AST + 110,
DIAG_START_SEMA = DIAG_START_COMMENT + 100,
diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td
index 629e8b837f59..4de4f47b8a5a 100644
--- a/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/include/clang/Basic/DiagnosticSemaKinds.td
@@ -8979,10 +8979,10 @@ def err_coroutine_promise_new_requires_nothrow : Error<
def note_coroutine_promise_call_implicitly_required : Note<
"call to %0 implicitly required by coroutine function here">;
def err_await_suspend_invalid_return_type : Error<
- "the return type of 'await_suspend' is required to be 'void' or 'bool' (have %0)"
+ "return type of 'await_suspend' is required to be 'void' or 'bool' (have %0)"
>;
def note_await_ready_no_bool_conversion : Note<
- "the return type of 'await_ready' is required to be contextually convertible to 'bool'"
+ "return type of 'await_ready' is required to be contextually convertible to 'bool'"
>;
}
diff --git a/include/clang/Basic/Module.h b/include/clang/Basic/Module.h
index 28aa7db52992..326d84eeb6c2 100644
--- a/include/clang/Basic/Module.h
+++ b/include/clang/Basic/Module.h
@@ -83,6 +83,10 @@ public:
/// are found.
const DirectoryEntry *Directory;
+ /// \brief The presumed file name for the module map defining this module.
+ /// Only non-empty when building from preprocessed source.
+ std::string PresumedModuleMapFile;
+
/// \brief The umbrella header or directory.
llvm::PointerUnion<const DirectoryEntry *, const FileEntry *> Umbrella;
diff --git a/include/clang/Basic/Sanitizers.def b/include/clang/Basic/Sanitizers.def
index f20d326e08f8..71b11974dbfd 100644
--- a/include/clang/Basic/Sanitizers.def
+++ b/include/clang/Basic/Sanitizers.def
@@ -73,6 +73,7 @@ SANITIZER("nullability-return", NullabilityReturn)
SANITIZER_GROUP("nullability", Nullability,
NullabilityArg | NullabilityAssign | NullabilityReturn)
SANITIZER("object-size", ObjectSize)
+SANITIZER("pointer-overflow", PointerOverflow)
SANITIZER("return", Return)
SANITIZER("returns-nonnull-attribute", ReturnsNonnullAttribute)
SANITIZER("shift-base", ShiftBase)
@@ -108,9 +109,9 @@ SANITIZER("safe-stack", SafeStack)
SANITIZER_GROUP("undefined", Undefined,
Alignment | Bool | ArrayBounds | Enum | FloatCastOverflow |
FloatDivideByZero | IntegerDivideByZero | NonnullAttribute |
- Null | ObjectSize | Return | ReturnsNonnullAttribute |
- Shift | SignedIntegerOverflow | Unreachable | VLABound |
- Function | Vptr)
+ Null | ObjectSize | PointerOverflow | Return |
+ ReturnsNonnullAttribute | Shift | SignedIntegerOverflow |
+ Unreachable | VLABound | Function | Vptr)
// -fsanitize=undefined-trap is an alias for -fsanitize=undefined.
SANITIZER_GROUP("undefined-trap", UndefinedTrap, Undefined)
diff --git a/include/clang/Basic/TokenKinds.def b/include/clang/Basic/TokenKinds.def
index 968b203a3827..be67663a1015 100644
--- a/include/clang/Basic/TokenKinds.def
+++ b/include/clang/Basic/TokenKinds.def
@@ -411,6 +411,7 @@ TYPE_TRAIT_1(__is_sealed, IsSealed, KEYMS)
// MSVC12.0 / VS2013 Type Traits
TYPE_TRAIT_1(__is_destructible, IsDestructible, KEYMS)
+TYPE_TRAIT_1(__is_trivially_destructible, IsTriviallyDestructible, KEYCXX)
TYPE_TRAIT_1(__is_nothrow_destructible, IsNothrowDestructible, KEYMS)
TYPE_TRAIT_2(__is_nothrow_assignable, IsNothrowAssignable, KEYCXX)
TYPE_TRAIT_N(__is_constructible, IsConstructible, KEYCXX)
@@ -439,7 +440,6 @@ TYPE_TRAIT_2(__is_convertible_to, IsConvertibleTo, KEYCXX)
TYPE_TRAIT_1(__is_empty, IsEmpty, KEYCXX)
TYPE_TRAIT_1(__is_enum, IsEnum, KEYCXX)
TYPE_TRAIT_1(__is_final, IsFinal, KEYCXX)
-// Tentative name - there's no implementation of std::is_literal_type yet.
TYPE_TRAIT_1(__is_literal, IsLiteral, KEYCXX)
// Name for GCC 4.6 compatibility - people have already written libraries using
// this name unfortunately.
diff --git a/include/clang/Basic/TypeTraits.h b/include/clang/Basic/TypeTraits.h
index ffe62559002e..6aadf795d82e 100644
--- a/include/clang/Basic/TypeTraits.h
+++ b/include/clang/Basic/TypeTraits.h
@@ -65,6 +65,7 @@ namespace clang {
UTT_IsStandardLayout,
UTT_IsTrivial,
UTT_IsTriviallyCopyable,
+ UTT_IsTriviallyDestructible,
UTT_IsUnion,
UTT_IsUnsigned,
UTT_IsVoid,
diff --git a/include/clang/Driver/CLCompatOptions.td b/include/clang/Driver/CLCompatOptions.td
index d0d9c679de7a..aebb36ed0e2b 100644
--- a/include/clang/Driver/CLCompatOptions.td
+++ b/include/clang/Driver/CLCompatOptions.td
@@ -63,6 +63,12 @@ def _SLASH_C : CLFlag<"C">,
def _SLASH_c : CLFlag<"c">, HelpText<"Compile only">, Alias<c>;
def _SLASH_d1reportAllClassLayout : CLFlag<"d1reportAllClassLayout">,
HelpText<"Dump record layout information">, Alias<fdump_record_layouts>;
+def _SLASH_diagnostics_caret : CLFlag<"diagnostics:caret">,
+ HelpText<"Enable caret and column diagnostics (on by default)">;
+def _SLASH_diagnostics_column : CLFlag<"diagnostics:column">,
+ HelpText<"Disable caret diagnostics but keep column info">;
+def _SLASH_diagnostics_classic : CLFlag<"diagnostics:classic">,
+ HelpText<"Disable column and caret diagnostics">;
def _SLASH_D : CLJoinedOrSeparate<"D">, HelpText<"Define macro">,
MetaVarName<"<macro[=value]>">, Alias<D>;
def _SLASH_E : CLFlag<"E">, HelpText<"Preprocess to stdout">, Alias<E>;
@@ -324,6 +330,7 @@ def _SLASH_Zc_forScope : CLIgnoredFlag<"Zc:forScope">;
def _SLASH_Zc_inline : CLIgnoredFlag<"Zc:inline">;
def _SLASH_Zc_rvalueCast : CLIgnoredFlag<"Zc:rvalueCast">;
def _SLASH_Zc_wchar_t : CLIgnoredFlag<"Zc:wchar_t">;
+def _SLASH_Zc_ternary : CLIgnoredFlag<"Zc:ternary">;
def _SLASH_Zm : CLIgnoredJoined<"Zm">;
def _SLASH_Zo : CLIgnoredFlag<"Zo">;
def _SLASH_Zo_ : CLIgnoredFlag<"Zo-">;
diff --git a/include/clang/Driver/Options.td b/include/clang/Driver/Options.td
index 2de4a2d1b419..12e36cc52b0b 100644
--- a/include/clang/Driver/Options.td
+++ b/include/clang/Driver/Options.td
@@ -2084,7 +2084,7 @@ def no_cpp_precomp : Flag<["-"], "no-cpp-precomp">, Group<clang_ignored_f_Group>
def no_integrated_cpp : Flag<["-", "--"], "no-integrated-cpp">, Flags<[DriverOption]>;
def no_pedantic : Flag<["-", "--"], "no-pedantic">, Group<pedantic_Group>;
def no__dead__strip__inits__and__terms : Flag<["-"], "no_dead_strip_inits_and_terms">;
-def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option]>,
+def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option, CoreOption]>,
HelpText<"Disable builtin #include directories">;
def nocudainc : Flag<["-"], "nocudainc">;
def nocudalib : Flag<["-"], "nocudalib">;
@@ -2096,7 +2096,7 @@ def nopie : Flag<["-"], "nopie">;
def noprebind : Flag<["-"], "noprebind">;
def noseglinkedit : Flag<["-"], "noseglinkedit">;
def nostartfiles : Flag<["-"], "nostartfiles">;
-def nostdinc : Flag<["-"], "nostdinc">;
+def nostdinc : Flag<["-"], "nostdinc">, Flags<[CoreOption]>;
def nostdlibinc : Flag<["-"], "nostdlibinc">;
def nostdincxx : Flag<["-"], "nostdinc++">, Flags<[CC1Option]>,
HelpText<"Disable standard #include directories for the C++ standard library">;
diff --git a/include/clang/Lex/HeaderSearch.h b/include/clang/Lex/HeaderSearch.h
index 6e24e1893ab6..ee17dcbb8b5a 100644
--- a/include/clang/Lex/HeaderSearch.h
+++ b/include/clang/Lex/HeaderSearch.h
@@ -543,10 +543,13 @@ public:
/// \param Offset [inout] An offset within ID to start parsing. On exit,
/// filled by the end of the parsed contents (either EOF or the
/// location of an end-of-module-map pragma).
- ///
+ /// \param OriginalModuleMapFile The original path to the module map file,
+ /// used to resolve paths within the module (this is required when
+ /// building the module from preprocessed source).
/// \returns true if an error occurred, false otherwise.
bool loadModuleMapFile(const FileEntry *File, bool IsSystem,
- FileID ID = FileID(), unsigned *Offset = nullptr);
+ FileID ID = FileID(), unsigned *Offset = nullptr,
+ StringRef OriginalModuleMapFile = StringRef());
/// \brief Collect the set of all known, top-level modules.
///
diff --git a/lib/AST/ODRHash.cpp b/lib/AST/ODRHash.cpp
index 24371db64d07..0e822ce35b8c 100644
--- a/lib/AST/ODRHash.cpp
+++ b/lib/AST/ODRHash.cpp
@@ -110,7 +110,24 @@ void ODRHash::AddNestedNameSpecifier(const NestedNameSpecifier *NNS) {
}
}
-void ODRHash::AddTemplateName(TemplateName Name) {}
+void ODRHash::AddTemplateName(TemplateName Name) {
+ auto Kind = Name.getKind();
+ ID.AddInteger(Kind);
+
+ switch (Kind) {
+ case TemplateName::Template:
+ AddDecl(Name.getAsTemplateDecl());
+ break;
+ // TODO: Support these cases.
+ case TemplateName::OverloadedTemplate:
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::DependentTemplate:
+ case TemplateName::SubstTemplateTemplateParm:
+ case TemplateName::SubstTemplateTemplateParmPack:
+ break;
+ }
+}
+
void ODRHash::AddTemplateArgument(TemplateArgument TA) {}
void ODRHash::AddTemplateParameterList(const TemplateParameterList *TPL) {}
@@ -492,6 +509,15 @@ public:
AddQualType(T->getNamedType());
VisitTypeWithKeyword(T);
}
+
+ void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
+ ID.AddInteger(T->getNumArgs());
+ for (const auto &TA : T->template_arguments()) {
+ Hash.AddTemplateArgument(TA);
+ }
+ Hash.AddTemplateName(T->getTemplateName());
+ VisitType(T);
+ }
};
void ODRHash::AddType(const Type *T) {
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 6be83d22a256..605f73802afb 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -3123,6 +3123,7 @@ public:
case CC_Swift:
case CC_X86Pascal:
case CC_IntelOclBicc:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -4834,6 +4835,7 @@ public:
case CC_PreserveMost:
case CC_PreserveAll:
case CC_X86RegCall:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -4907,6 +4909,7 @@ public:
case CC_X86_64SysV:
case CC_Swift:
case CC_X86RegCall:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -5860,6 +5863,7 @@ public:
case CC_AAPCS:
case CC_AAPCS_VFP:
case CC_Swift:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -6019,6 +6023,7 @@ public:
case CC_X86VectorCall:
return CCCR_Ignore;
case CC_C:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -6329,6 +6334,7 @@ public:
case CC_Swift:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -7380,6 +7386,7 @@ public:
switch (CC) {
case CC_C:
case CC_Swift:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -7663,6 +7670,15 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
return None;
}
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ case CC_OpenCLKernel:
+ return CCCR_OK;
+ }
+ }
};
class MipsTargetInfo : public TargetInfo {
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index c0be60ef53bc..e4dce2f2a004 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -149,7 +149,6 @@ namespace swiftcall {
return info->supportsSwift();
}
};
-
} // end namespace CodeGen
} // end namespace clang
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index c677d9887acc..8f405eee6e52 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -707,6 +707,12 @@ CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
signature.getRequiredArgs());
}
+namespace clang {
+namespace CodeGen {
+void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
+}
+}
+
/// Arrange the argument and result information for an abstract value
/// of a given function type. This is the method which all of the
/// above functions ultimately defer to.
@@ -741,12 +747,16 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
bool inserted = FunctionsBeingProcessed.insert(FI).second;
(void)inserted;
assert(inserted && "Recursively being processed?");
-
+
// Compute ABI information.
- if (info.getCC() != CC_Swift) {
- getABIInfo().computeInfo(*FI);
- } else {
+ if (CC == llvm::CallingConv::SPIR_KERNEL) {
+ // Force target independent argument handling for the host visible
+ // kernel functions.
+ computeSPIRKernelABIInfo(CGM, *FI);
+ } else if (info.getCC() == CC_Swift) {
swiftcall::computeABIInfo(CGM, *FI);
+ } else {
+ getABIInfo().computeInfo(*FI);
}
// Loop over all of the computed argument and return value info. If any of
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index e8bcf0a3ac56..b5453bc11e30 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -448,6 +448,13 @@ void CodeGenFunction::PopCleanupBlocks(
auto *Inst = dyn_cast_or_null<llvm::Instruction>(*ReloadedValue);
if (!Inst)
continue;
+
+ // Don't spill static allocas, they dominate all cleanups. These are created
+ // by binding a reference to a local variable or temporary.
+ auto *AI = dyn_cast<llvm::AllocaInst>(Inst);
+ if (AI && AI->isStaticAlloca())
+ continue;
+
Address Tmp =
CreateDefaultAlignTempAlloca(Inst->getType(), "tmp.exprcleanup");
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index b918a663ce5c..84ce896506d5 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -3002,9 +3002,10 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
llvm::Value *ptr,
ArrayRef<llvm::Value*> indices,
bool inbounds,
+ SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
if (inbounds) {
- return CGF.Builder.CreateInBoundsGEP(ptr, indices, name);
+ return CGF.EmitCheckedInBoundsGEP(ptr, indices, loc, name);
} else {
return CGF.Builder.CreateGEP(ptr, indices, name);
}
@@ -3035,8 +3036,9 @@ static QualType getFixedSizeElementType(const ASTContext &ctx,
}
static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
- ArrayRef<llvm::Value*> indices,
+ ArrayRef<llvm::Value *> indices,
QualType eltType, bool inbounds,
+ SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
// All the indices except that last must be zero.
#ifndef NDEBUG
@@ -3057,7 +3059,7 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
llvm::Value *eltPtr =
- emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, name);
+ emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, loc, name);
return Address(eltPtr, eltAlign);
}
@@ -3110,7 +3112,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
Address Addr = EmitExtVectorElementLValue(LV);
QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
- Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true);
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
+ E->getExprLoc());
return MakeAddrLValue(Addr, EltType, LV.getBaseInfo());
}
@@ -3138,7 +3141,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
}
Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
@@ -3163,8 +3167,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// Do the GEP.
CharUnits EltAlign =
getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
- llvm::Value *EltPtr =
- emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false);
+ llvm::Value *EltPtr = emitArraySubscriptGEP(
+ *this, Addr.getPointer(), ScaledIdx, false, E->getExprLoc());
Addr = Address(EltPtr, EltAlign);
// Cast back.
@@ -3189,14 +3193,16 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(),
{CGM.getSize(CharUnits::Zero()), Idx},
E->getType(),
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
} else {
// The base must be a pointer; emit it with an estimate of its alignment.
Addr = EmitPointerWithAlignment(E->getBase(), &BaseInfo);
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
}
LValue LV = MakeAddrLValue(Addr, E->getType(), BaseInfo);
@@ -3368,7 +3374,8 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
else
Idx = Builder.CreateNSWMul(Idx, NumElements);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
@@ -3387,13 +3394,15 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
// Propagate the alignment from the array itself to the result.
EltPtr = emitArraySubscriptGEP(
*this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
- ResultExprTy, !getLangOpts().isSignedOverflowDefined());
+ ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
} else {
Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
BaseTy, ResultExprTy, IsLowerBound);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
}
return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo);
@@ -3530,6 +3539,25 @@ static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
return CGF.Builder.CreateStructGEP(base, idx, offset, field->getName());
}
+static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
+ const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+
+ if (RD->isDynamicClass())
+ return true;
+
+ for (const auto &Base : RD->bases())
+ if (hasAnyVptr(Base.getType(), Context))
+ return true;
+
+ for (const FieldDecl *Field : RD->fields())
+ if (hasAnyVptr(Field->getType(), Context))
+ return true;
+
+ return false;
+}
+
LValue CodeGenFunction::EmitLValueForField(LValue base,
const FieldDecl *field) {
LValueBaseInfo BaseInfo = base.getBaseInfo();
@@ -3572,6 +3600,14 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
assert(!type->isReferenceType() && "union has reference member");
// TODO: handle path-aware TBAA for union.
TBAAPath = false;
+
+ const auto FieldType = field->getType();
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ hasAnyVptr(FieldType, getContext()))
+ // Because unions can easily skip invariant.barriers, we need to add
+ // a barrier every time CXXRecord field with vptr is referenced.
+ addr = Address(Builder.CreateInvariantGroupBarrier(addr.getPointer()),
+ addr.getAlignment());
} else {
// For structs, we GEP to the field that the record layout suggests.
addr = emitAddrOfFieldStorage(*this, addr, field);
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 048b50d8261d..d604b4130a23 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -30,6 +30,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
@@ -44,6 +45,43 @@ using llvm::Value;
//===----------------------------------------------------------------------===//
namespace {
+
+/// Determine whether the given binary operation may overflow.
+/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
+/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
+/// the returned overflow check is precise. The returned value is 'true' for
+/// all other opcodes, to be conservative.
+bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
+ BinaryOperator::Opcode Opcode, bool Signed,
+ llvm::APInt &Result) {
+ // Assume overflow is possible, unless we can prove otherwise.
+ bool Overflow = true;
+ const auto &LHSAP = LHS->getValue();
+ const auto &RHSAP = RHS->getValue();
+ if (Opcode == BO_Add) {
+ if (Signed)
+ Result = LHSAP.sadd_ov(RHSAP, Overflow);
+ else
+ Result = LHSAP.uadd_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Sub) {
+ if (Signed)
+ Result = LHSAP.ssub_ov(RHSAP, Overflow);
+ else
+ Result = LHSAP.usub_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Mul) {
+ if (Signed)
+ Result = LHSAP.smul_ov(RHSAP, Overflow);
+ else
+ Result = LHSAP.umul_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Div || Opcode == BO_Rem) {
+ if (Signed && !RHS->isZero())
+ Result = LHSAP.sdiv_ov(RHSAP, Overflow);
+ else
+ return false;
+ }
+ return Overflow;
+}
+
struct BinOpInfo {
Value *LHS;
Value *RHS;
@@ -55,37 +93,14 @@ struct BinOpInfo {
/// Check if the binop can result in integer overflow.
bool mayHaveIntegerOverflow() const {
// Without constant input, we can't rule out overflow.
- const auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
- const auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
+ auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
+ auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
if (!LHSCI || !RHSCI)
return true;
- // Assume overflow is possible, unless we can prove otherwise.
- bool Overflow = true;
- const auto &LHSAP = LHSCI->getValue();
- const auto &RHSAP = RHSCI->getValue();
- if (Opcode == BO_Add) {
- if (Ty->hasSignedIntegerRepresentation())
- (void)LHSAP.sadd_ov(RHSAP, Overflow);
- else
- (void)LHSAP.uadd_ov(RHSAP, Overflow);
- } else if (Opcode == BO_Sub) {
- if (Ty->hasSignedIntegerRepresentation())
- (void)LHSAP.ssub_ov(RHSAP, Overflow);
- else
- (void)LHSAP.usub_ov(RHSAP, Overflow);
- } else if (Opcode == BO_Mul) {
- if (Ty->hasSignedIntegerRepresentation())
- (void)LHSAP.smul_ov(RHSAP, Overflow);
- else
- (void)LHSAP.umul_ov(RHSAP, Overflow);
- } else if (Opcode == BO_Div || Opcode == BO_Rem) {
- if (Ty->hasSignedIntegerRepresentation() && !RHSCI->isZero())
- (void)LHSAP.sdiv_ov(RHSAP, Overflow);
- else
- return false;
- }
- return Overflow;
+ llvm::APInt Result;
+ return ::mayHaveIntegerOverflow(
+ LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
}
/// Check if the binop computes a division or a remainder.
@@ -1925,7 +1940,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
else
- value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
+ value = CGF.EmitCheckedInBoundsGEP(value, numElts, E->getExprLoc(),
+ "vla.inc");
// Arithmetic on function pointers (!) is just +-1.
} else if (type->isFunctionType()) {
@@ -1935,7 +1951,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.funcptr");
else
- value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
+ value = CGF.EmitCheckedInBoundsGEP(value, amt, E->getExprLoc(),
+ "incdec.funcptr");
value = Builder.CreateBitCast(value, input->getType());
// For everything else, we can just do a simple increment.
@@ -1944,7 +1961,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.ptr");
else
- value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
+ value = CGF.EmitCheckedInBoundsGEP(value, amt, E->getExprLoc(),
+ "incdec.ptr");
}
// Vector increment/decrement.
@@ -2025,7 +2043,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
else
- value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
+ value = CGF.EmitCheckedInBoundsGEP(value, sizeValue, E->getExprLoc(),
+ "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
@@ -2692,7 +2711,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
} else {
index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ pointer = CGF.EmitCheckedInBoundsGEP(pointer, index, op.E->getExprLoc(),
+ "add.ptr");
}
return pointer;
}
@@ -2709,7 +2729,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
if (CGF.getLangOpts().isSignedOverflowDefined())
return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
- return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ return CGF.EmitCheckedInBoundsGEP(pointer, index, op.E->getExprLoc(),
+ "add.ptr");
}
// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
@@ -3824,3 +3845,124 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue(
llvm_unreachable("Unhandled compound assignment operator");
}
+
+Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr,
+ ArrayRef<Value *> IdxList,
+ SourceLocation Loc,
+ const Twine &Name) {
+ Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
+
+ // If the pointer overflow sanitizer isn't enabled, do nothing.
+ if (!SanOpts.has(SanitizerKind::PointerOverflow))
+ return GEPVal;
+
+ // If the GEP has already been reduced to a constant, leave it be.
+ if (isa<llvm::Constant>(GEPVal))
+ return GEPVal;
+
+ // Only check for overflows in the default address space.
+ if (GEPVal->getType()->getPointerAddressSpace())
+ return GEPVal;
+
+ auto *GEP = cast<llvm::GEPOperator>(GEPVal);
+ assert(GEP->isInBounds() && "Expected inbounds GEP");
+
+ SanitizerScope SanScope(this);
+ auto &VMContext = getLLVMContext();
+ const auto &DL = CGM.getDataLayout();
+ auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
+
+ // Grab references to the signed add/mul overflow intrinsics for intptr_t.
+ auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
+ auto *SAddIntrinsic =
+ CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
+ auto *SMulIntrinsic =
+ CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
+
+ // The total (signed) byte offset for the GEP.
+ llvm::Value *TotalOffset = nullptr;
+ // The offset overflow flag - true if the total offset overflows.
+ llvm::Value *OffsetOverflows = Builder.getFalse();
+
+ /// Return the result of the given binary operation.
+ auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
+ llvm::Value *RHS) -> llvm::Value * {
+ assert(Opcode == BO_Add || Opcode == BO_Mul && "Can't eval binop");
+
+ // If the operands are constants, return a constant result.
+ if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
+ if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
+ llvm::APInt N;
+ bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
+ /*Signed=*/true, N);
+ if (HasOverflow)
+ OffsetOverflows = Builder.getTrue();
+ return llvm::ConstantInt::get(VMContext, N);
+ }
+ }
+
+ // Otherwise, compute the result with checked arithmetic.
+ auto *ResultAndOverflow = Builder.CreateCall(
+ (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
+ OffsetOverflows = Builder.CreateOr(
+ OffsetOverflows, Builder.CreateExtractValue(ResultAndOverflow, 1));
+ return Builder.CreateExtractValue(ResultAndOverflow, 0);
+ };
+
+ // Determine the total byte offset by looking at each GEP operand.
+ for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
+ GTI != GTE; ++GTI) {
+ llvm::Value *LocalOffset;
+ auto *Index = GTI.getOperand();
+ // Compute the local offset contributed by this indexing step:
+ if (auto *STy = GTI.getStructTypeOrNull()) {
+ // For struct indexing, the local offset is the byte position of the
+ // specified field.
+ unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
+ LocalOffset = llvm::ConstantInt::get(
+ IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
+ } else {
+ // Otherwise this is array-like indexing. The local offset is the index
+ // multiplied by the element size.
+ auto *ElementSize = llvm::ConstantInt::get(
+ IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
+ auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
+ LocalOffset = eval(BO_Mul, ElementSize, IndexS);
+ }
+
+ // If this is the first offset, set it as the total offset. Otherwise, add
+ // the local offset into the running total.
+ if (!TotalOffset || TotalOffset == Zero)
+ TotalOffset = LocalOffset;
+ else
+ TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
+ }
+
+ // Common case: if the total offset is zero, don't emit a check.
+ if (TotalOffset == Zero)
+ return GEPVal;
+
+ // Now that we've computed the total offset, add it to the base pointer (with
+ // wrapping semantics).
+ auto *IntPtr = Builder.CreatePtrToInt(GEP->getPointerOperand(), IntPtrTy);
+ auto *ComputedGEP = Builder.CreateAdd(IntPtr, TotalOffset);
+
+ // The GEP is valid if:
+ // 1) The total offset doesn't overflow, and
+ // 2) The sign of the difference between the computed address and the base
+ // pointer matches the sign of the total offset.
+ llvm::Value *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
+ llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
+ auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero);
+ llvm::Value *ValidGEP = Builder.CreateAnd(
+ Builder.CreateNot(OffsetOverflows),
+ Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid));
+
+ llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
+ // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
+ llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
+ EmitCheck(std::make_pair(ValidGEP, SanitizerKind::PointerOverflow),
+ SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
+
+ return GEPVal;
+}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 3e3d04672357..b5599dad3096 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -90,7 +90,11 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- QualType IvarTy = Ivar->getType().withCVRQualifiers(CVRQualifiers);
+ QualType InterfaceTy{OID->getTypeForDecl(), 0};
+ QualType ObjectPtrTy =
+ CGF.CGM.getContext().getObjCObjectPointerType(InterfaceTy);
+ QualType IvarTy =
+ Ivar->getUsageType(ObjectPtrTy).withCVRQualifiers(CVRQualifiers);
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 1869c0e809df..64b6d0d3fe9f 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -901,6 +901,8 @@ void CodeGenModule::EmitDeferredVTables() {
for (const CXXRecordDecl *RD : DeferredVTables)
if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD))
VTables.GenerateClassData(RD);
+ else if (shouldOpportunisticallyEmitVTables())
+ OpportunisticVTables.push_back(RD);
assert(savedSize == DeferredVTables.size() &&
"deferred extra vtables during vtable emission?");
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 526ef9a1e579..42ffd0d3efcc 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -120,6 +120,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
SANITIZER_CHECK(NonnullReturn, nonnull_return, 0) \
SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
+ SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
@@ -3551,6 +3552,13 @@ public:
/// nonnull, if \p LHS is marked _Nonnull.
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
+ /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
+ /// detect undefined behavior when the pointer overflow sanitizer is enabled.
+ llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
+ ArrayRef<llvm::Value *> IdxList,
+ SourceLocation Loc,
+ const Twine &Name = "");
+
/// \brief Emit a description of a type in a format suitable for passing to
/// a runtime sanitizer handler.
llvm::Constant *EmitCheckTypeDescriptor(QualType T);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index e4e5fce02279..c61a5f6ffa71 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -382,6 +382,7 @@ void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
void CodeGenModule::Release() {
EmitDeferred();
+ EmitVTablesOpportunistically();
applyGlobalValReplacements();
applyReplacements();
checkAliases();
@@ -472,10 +473,10 @@ void CodeGenModule::Release() {
// Width of wchar_t in bytes
uint64_t WCharWidth =
Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
- assert(LangOpts.ShortWChar ||
- llvm::TargetLibraryInfoImpl::getTargetWCharSize(Target.getTriple()) ==
- Target.getWCharWidth() / 8 &&
- "LLVM wchar_t size out of sync");
+ assert((LangOpts.ShortWChar ||
+ llvm::TargetLibraryInfoImpl::getTargetWCharSize(Target.getTriple()) ==
+ Target.getWCharWidth() / 8) &&
+ "LLVM wchar_t size out of sync");
// We need to record the widths of enums and wchar_t, so that we can generate
// the correct build attributes in the ARM backend. wchar_size is also used by
@@ -1386,6 +1387,24 @@ void CodeGenModule::EmitDeferred() {
}
}
+void CodeGenModule::EmitVTablesOpportunistically() {
+ // Try to emit external vtables as available_externally if they have emitted
+ // all inlined virtual functions. It runs after EmitDeferred() and therefore
+ // is not allowed to create new references to things that need to be emitted
+ // lazily. Note that it also uses fact that we eagerly emitting RTTI.
+
+ assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
+ && "Only emit opportunistic vtables with optimizations");
+
+ for (const CXXRecordDecl *RD : OpportunisticVTables) {
+ assert(getVTables().isVTableExternal(RD) &&
+ "This queue should only contain external vtables");
+ if (getCXXABI().canSpeculativelyEmitVTable(RD))
+ VTables.GenerateClassData(RD);
+ }
+ OpportunisticVTables.clear();
+}
+
void CodeGenModule::EmitGlobalAnnotations() {
if (Annotations.empty())
return;
@@ -1906,6 +1925,10 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
return !isTriviallyRecursive(F);
}
+bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
+ return CodeGenOpts.OptimizationLevel > 0;
+}
+
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
const auto *D = cast<ValueDecl>(GD.getDecl());
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index e38337814ebc..0a71c635e8f0 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -341,6 +341,9 @@ private:
/// A queue of (optional) vtables to consider emitting.
std::vector<const CXXRecordDecl*> DeferredVTables;
+ /// A queue of (optional) vtables that may be emitted opportunistically.
+ std::vector<const CXXRecordDecl *> OpportunisticVTables;
+
/// List of global values which are required to be present in the object file;
/// bitcast to i8*. This is used for forcing visibility of symbols which may
/// otherwise be optimized out.
@@ -450,7 +453,7 @@ private:
bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(GlobalDecl GD);
-
+ bool shouldOpportunisticallyEmitVTables();
/// Map used to be sure we don't emit the same CompoundLiteral twice.
llvm::DenseMap<const CompoundLiteralExpr *, llvm::GlobalVariable *>
EmittedCompoundLiterals;
@@ -1278,6 +1281,12 @@ private:
/// Emit any needed decls for which code generation was deferred.
void EmitDeferred();
+ /// Try to emit external vtables as available_externally if they have emitted
+ /// all inlined virtual functions. It runs after EmitDeferred() and therefore
+ /// is not allowed to create new references to things that need to be emitted
+ /// lazily.
+ void EmitVTablesOpportunistically();
+
/// Call replaceAllUsesWith on all pairs in Replacements.
void applyReplacements();
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 66f51305430a..62b0e6155f99 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -366,20 +366,30 @@ public:
void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
private:
- bool hasAnyVirtualInlineFunction(const CXXRecordDecl *RD) const {
- const auto &VtableLayout =
- CGM.getItaniumVTableContext().getVTableLayout(RD);
-
- for (const auto &VtableComponent : VtableLayout.vtable_components()) {
- // Skip empty slot.
- if (!VtableComponent.isUsedFunctionPointerKind())
- continue;
-
- const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
- if (Method->getCanonicalDecl()->isInlined())
- return true;
- }
- return false;
+ bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
+ const auto &VtableLayout =
+ CGM.getItaniumVTableContext().getVTableLayout(RD);
+
+ for (const auto &VtableComponent : VtableLayout.vtable_components()) {
+ // Skip empty slot.
+ if (!VtableComponent.isUsedFunctionPointerKind())
+ continue;
+
+ const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
+ if (!Method->getCanonicalDecl()->isInlined())
+ continue;
+
+ StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
+ auto *Entry = CGM.GetGlobalValue(Name);
+ // This checks if virtual inline function has already been emitted.
+ // Note that it is possible that this inline function would be emitted
+ // after trying to emit vtable speculatively. Because of this we do
+ // an extra pass after emitting all deferred vtables to find and emit
+ // these vtables opportunistically.
+ if (!Entry || Entry->isDeclaration())
+ return true;
+ }
+ return false;
}
bool isVTableHidden(const CXXRecordDecl *RD) const {
@@ -1687,11 +1697,11 @@ bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
if (CGM.getLangOpts().AppleKext)
return false;
- // If we don't have any inline virtual functions, and if vtable is not hidden,
- // then we are safe to emit available_externally copy of vtable.
+ // If we don't have any not emitted inline virtual function, and if vtable is
+ // not hidden, then we are safe to emit available_externally copy of vtable.
// FIXME we can still emit a copy of the vtable if we
// can emit definition of the inline functions.
- return !hasAnyVirtualInlineFunction(RD) && !isVTableHidden(RD);
+ return !hasAnyUnusedVirtualInlineFunction(RD) && !isVTableHidden(RD);
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
Address InitialPtr,
@@ -2576,6 +2586,9 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
if (!GV) {
// Create a new global variable.
+ // Note for the future: If we would ever like to do deferred emission of
+ // RTTI, check if emitting vtables opportunistically need any adjustment.
+
GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
/*Constant=*/true,
llvm::GlobalValue::ExternalLinkage, nullptr,
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 4cacf494e694..ff5aca88131e 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -3756,6 +3756,9 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+ // Note for the future: If we would ever like to do deferred emission of
+ // RTTI, check if emitting vtables opportunistically need any adjustment.
+
// Compute the fields for the TypeDescriptor.
SmallString<256> TypeInfoString;
{
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index d0ba74119b7d..427ec06a2fff 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -398,7 +398,17 @@ TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
}
unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
- return llvm::CallingConv::C;
+ // OpenCL kernels are called via an explicit runtime API with arguments
+ // set with clSetKernelArg(), not as normal sub-functions.
+ // Return SPIR_KERNEL by default as the kernel calling convention to
+ // ensure the fingerprint is fixed such way that each OpenCL argument
+ // gets one matching argument in the produced kernel function argument
+ // list to enable feasible implementation of clSetKernelArg() with
+ // aggregates etc. In case we would use the default C calling conv here,
+ // clSetKernelArg() might break depending on the target-specific
+ // conventions; different targets might split structs passed as values
+ // to multiple function arguments etc.
+ return llvm::CallingConv::SPIR_KERNEL;
}
llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
@@ -8068,8 +8078,18 @@ public:
CodeGen::CodeGenModule &M) const override;
unsigned getOpenCLKernelCallingConv() const override;
};
+
} // End anonymous namespace.
+namespace clang {
+namespace CodeGen {
+void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
+ DefaultABIInfo SPIRABI(CGM.getTypes());
+ SPIRABI.computeInfo(FI);
+}
+}
+}
+
/// Emit SPIR specific metadata: OpenCL and SPIR version.
void SPIRTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const {
diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp
index 555847aeeb23..698c3aa326cb 100644
--- a/lib/Driver/ToolChains/Clang.cpp
+++ b/lib/Driver/ToolChains/Clang.cpp
@@ -3985,9 +3985,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< value;
}
+ bool CaretDefault = true;
+ bool ColumnDefault = true;
+ if (Arg *DiagArg = Args.getLastArg(options::OPT__SLASH_diagnostics_classic,
+ options::OPT__SLASH_diagnostics_column,
+ options::OPT__SLASH_diagnostics_caret)) {
+ switch (DiagArg->getOption().getID()) {
+ case options::OPT__SLASH_diagnostics_caret:
+ CaretDefault = true;
+ ColumnDefault = true;
+ break;
+ case options::OPT__SLASH_diagnostics_column:
+ CaretDefault = false;
+ ColumnDefault = true;
+ break;
+ case options::OPT__SLASH_diagnostics_classic:
+ CaretDefault = false;
+ ColumnDefault = false;
+ break;
+ }
+ }
+
// -fcaret-diagnostics is default.
if (!Args.hasFlag(options::OPT_fcaret_diagnostics,
- options::OPT_fno_caret_diagnostics, true))
+ options::OPT_fno_caret_diagnostics, CaretDefault))
CmdArgs.push_back("-fno-caret-diagnostics");
// -fdiagnostics-fixit-info is default, only pass non-default.
@@ -4059,7 +4080,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fdiagnostics-absolute-paths");
if (!Args.hasFlag(options::OPT_fshow_column, options::OPT_fno_show_column,
- true))
+ ColumnDefault))
CmdArgs.push_back("-fno-show-column");
if (!Args.hasFlag(options::OPT_fspell_checking,
@@ -4781,14 +4802,36 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("-fms-memptr-rep=virtual");
}
- if (Args.getLastArg(options::OPT__SLASH_Gd))
- CmdArgs.push_back("-fdefault-calling-conv=cdecl");
- else if (Args.getLastArg(options::OPT__SLASH_Gr))
- CmdArgs.push_back("-fdefault-calling-conv=fastcall");
- else if (Args.getLastArg(options::OPT__SLASH_Gz))
- CmdArgs.push_back("-fdefault-calling-conv=stdcall");
- else if (Args.getLastArg(options::OPT__SLASH_Gv))
- CmdArgs.push_back("-fdefault-calling-conv=vectorcall");
+ // Parse the default calling convention options.
+ if (Arg *CCArg =
+ Args.getLastArg(options::OPT__SLASH_Gd, options::OPT__SLASH_Gr,
+ options::OPT__SLASH_Gz, options::OPT__SLASH_Gv)) {
+ unsigned DCCOptId = CCArg->getOption().getID();
+ const char *DCCFlag = nullptr;
+ bool ArchSupported = true;
+ llvm::Triple::ArchType Arch = getToolChain().getArch();
+ switch (DCCOptId) {
+ case options::OPT__SLASH_Gd:
+ DCCFlag = "-fdefault-calling-conv=cdecl";
+ break;
+ case options::OPT__SLASH_Gr:
+ ArchSupported = Arch == llvm::Triple::x86;
+ DCCFlag = "-fdefault-calling-conv=fastcall";
+ break;
+ case options::OPT__SLASH_Gz:
+ ArchSupported = Arch == llvm::Triple::x86;
+ DCCFlag = "-fdefault-calling-conv=stdcall";
+ break;
+ case options::OPT__SLASH_Gv:
+ ArchSupported = Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64;
+ DCCFlag = "-fdefault-calling-conv=vectorcall";
+ break;
+ }
+
+ // MSVC doesn't warn if /Gr or /Gz is used on x64, so we don't either.
+ if (ArchSupported && DCCFlag)
+ CmdArgs.push_back(DCCFlag);
+ }
if (Arg *A = Args.getLastArg(options::OPT_vtordisp_mode_EQ))
A->render(Args, CmdArgs);
diff --git a/lib/Driver/ToolChains/Gnu.cpp b/lib/Driver/ToolChains/Gnu.cpp
index 1a398fd8a773..d50f8e21f62f 100644
--- a/lib/Driver/ToolChains/Gnu.cpp
+++ b/lib/Driver/ToolChains/Gnu.cpp
@@ -278,20 +278,20 @@ static void AddOpenMPLinkerScript(const ToolChain &TC, Compilation &C,
LksStream << "SECTIONS\n";
LksStream << "{\n";
- LksStream << " .omp_offloading :\n";
- LksStream << " ALIGN(0x10)\n";
- LksStream << " {\n";
- for (auto &BI : InputBinaryInfo) {
- LksStream << " . = ALIGN(0x10);\n";
+ // Put each target binary into a separate section.
+ for (const auto &BI : InputBinaryInfo) {
+ LksStream << " .omp_offloading." << BI.first << " :\n";
+ LksStream << " ALIGN(0x10)\n";
+ LksStream << " {\n";
LksStream << " PROVIDE_HIDDEN(.omp_offloading.img_start." << BI.first
<< " = .);\n";
LksStream << " " << BI.second << "\n";
LksStream << " PROVIDE_HIDDEN(.omp_offloading.img_end." << BI.first
<< " = .);\n";
+ LksStream << " }\n";
}
- LksStream << " }\n";
// Add commands to define host entries begin and end. We use 1-byte subalign
// so that the linker does not add any padding and the elements in this
// section form an array.
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index ae79ea5d8a66..eda7ef36434d 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -360,16 +360,21 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
switch (Tok->Tok.getKind()) {
case tok::l_brace:
- if (Style.Language == FormatStyle::LK_JavaScript && PrevTok &&
- PrevTok->is(tok::colon))
- // A colon indicates this code is in a type, or a braced list following
- // a label in an object literal ({a: {b: 1}}).
- // The code below could be confused by semicolons between the individual
- // members in a type member list, which would normally trigger BK_Block.
- // In both cases, this must be parsed as an inline braced init.
- Tok->BlockKind = BK_BracedInit;
- else
+ if (Style.Language == FormatStyle::LK_JavaScript && PrevTok) {
+ if (PrevTok->is(tok::colon))
+ // A colon indicates this code is in a type, or a braced list
+ // following a label in an object literal ({a: {b: 1}}). The code
+ // below could be confused by semicolons between the individual
+ // members in a type member list, which would normally trigger
+ // BK_Block. In both cases, this must be parsed as an inline braced
+ // init.
+ Tok->BlockKind = BK_BracedInit;
+ else if (PrevTok->is(tok::r_paren))
+ // `) { }` can only occur in function or method declarations in JS.
+ Tok->BlockKind = BK_Block;
+ } else {
Tok->BlockKind = BK_Unknown;
+ }
LBraceStack.push_back(Tok);
break;
case tok::r_brace:
@@ -391,6 +396,8 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// BlockKind later if we parse a braced list (where all blocks
// inside are by default braced lists), or when we explicitly detect
// blocks (for example while parsing lambdas).
+ // FIXME: Some of these do not apply to JS, e.g. "} {" can never be a
+ // braced list in JS.
ProbablyBracedList =
(Style.Language == FormatStyle::LK_JavaScript &&
NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 7d7e7d49e9f0..47c763d29357 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -2214,8 +2214,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
llvm::Triple T(TargetOpts.Triple);
llvm::Triple::ArchType Arch = T.getArch();
bool emitError = (DefaultCC == LangOptions::DCC_FastCall ||
- DefaultCC == LangOptions::DCC_StdCall) &&
- Arch != llvm::Triple::x86;
+ DefaultCC == LangOptions::DCC_StdCall) &&
+ Arch != llvm::Triple::x86;
emitError |= DefaultCC == LangOptions::DCC_VectorCall &&
!(Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64);
if (emitError)
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 874c1b6be41e..cd67e469ddad 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -373,10 +373,11 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
return std::error_code();
}
-static bool
-loadModuleMapForModuleBuild(CompilerInstance &CI, StringRef Filename,
- bool IsSystem, bool IsPreprocessed,
- unsigned &Offset) {
+static bool loadModuleMapForModuleBuild(CompilerInstance &CI,
+ StringRef Filename, bool IsSystem,
+ bool IsPreprocessed,
+ std::string &PresumedModuleMapFile,
+ unsigned &Offset) {
auto &SrcMgr = CI.getSourceManager();
HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
@@ -388,16 +389,15 @@ loadModuleMapForModuleBuild(CompilerInstance &CI, StringRef Filename,
// line directives are not part of the module map syntax in general.
Offset = 0;
if (IsPreprocessed) {
- std::string PresumedModuleMapFile;
SourceLocation EndOfLineMarker =
ReadOriginalFileName(CI, PresumedModuleMapFile, /*AddLineNote*/true);
if (EndOfLineMarker.isValid())
Offset = CI.getSourceManager().getDecomposedLoc(EndOfLineMarker).second;
- // FIXME: Use PresumedModuleMapFile as the MODULE_MAP_FILE in the PCM.
}
// Load the module map file.
- if (HS.loadModuleMapFile(ModuleMap, IsSystem, ModuleMapID, &Offset))
+ if (HS.loadModuleMapFile(ModuleMap, IsSystem, ModuleMapID, &Offset,
+ PresumedModuleMapFile))
return true;
if (SrcMgr.getBuffer(ModuleMapID)->getBufferSize() == Offset)
@@ -664,15 +664,19 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (Input.getKind().getFormat() == InputKind::ModuleMap) {
CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleMap);
+ std::string PresumedModuleMapFile;
unsigned OffsetToContents;
if (loadModuleMapForModuleBuild(CI, Input.getFile(), Input.isSystem(),
- Input.isPreprocessed(), OffsetToContents))
+ Input.isPreprocessed(),
+ PresumedModuleMapFile, OffsetToContents))
goto failure;
auto *CurrentModule = prepareToBuildModule(CI, Input.getFile());
if (!CurrentModule)
goto failure;
+ CurrentModule->PresumedModuleMapFile = PresumedModuleMapFile;
+
if (OffsetToContents)
// If the module contents are in the same file, skip to them.
CI.getPreprocessor().setSkipMainFilePreamble(OffsetToContents, true);
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index f5b7c59e446f..9084bc352f76 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -1326,14 +1326,27 @@ static const FileEntry *getPrivateModuleMap(const FileEntry *File,
}
bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
- FileID ID, unsigned *Offset) {
+ FileID ID, unsigned *Offset,
+ StringRef OriginalModuleMapFile) {
// Find the directory for the module. For frameworks, that may require going
// up from the 'Modules' directory.
const DirectoryEntry *Dir = nullptr;
if (getHeaderSearchOpts().ModuleMapFileHomeIsCwd)
Dir = FileMgr.getDirectory(".");
else {
- Dir = File->getDir();
+ if (!OriginalModuleMapFile.empty()) {
+ // We're building a preprocessed module map. Find or invent the directory
+ // that it originally occupied.
+ Dir = FileMgr.getDirectory(
+ llvm::sys::path::parent_path(OriginalModuleMapFile));
+ if (!Dir) {
+ auto *FakeFile = FileMgr.getVirtualFile(OriginalModuleMapFile, 0, 0);
+ Dir = FakeFile->getDir();
+ }
+ } else {
+ Dir = File->getDir();
+ }
+
StringRef DirName(Dir->getName());
if (llvm::sys::path::filename(DirName) == "Modules") {
DirName = llvm::sys::path::parent_path(DirName);
diff --git a/lib/Sema/SemaCoroutine.cpp b/lib/Sema/SemaCoroutine.cpp
index ae6c35f22065..8a548c0ab861 100644
--- a/lib/Sema/SemaCoroutine.cpp
+++ b/lib/Sema/SemaCoroutine.cpp
@@ -391,8 +391,11 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
// [expr.await]p3 [...]
// - await-suspend is the expression e.await_suspend(h), which shall be
// a prvalue of type void or bool.
- QualType RetType = AwaitSuspend->getType();
- if (RetType != S.Context.BoolTy && RetType != S.Context.VoidTy) {
+ QualType RetType = AwaitSuspend->getCallReturnType(S.Context);
+ // non-class prvalues always have cv-unqualified types
+ QualType AdjRetType = RetType.getUnqualifiedType();
+ if (RetType->isReferenceType() ||
+ (AdjRetType != S.Context.BoolTy && AdjRetType != S.Context.VoidTy)) {
S.Diag(AwaitSuspend->getCalleeDecl()->getLocation(),
diag::err_await_suspend_invalid_return_type)
<< RetType;
@@ -437,6 +440,7 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
if (VD->isInvalidDecl())
return nullptr;
ActOnUninitializedDecl(VD);
+ FD->addDecl(VD);
assert(!VD->isInvalidDecl());
return VD;
}
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index a9adbec4f842..ea1f7526a832 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -11116,6 +11116,17 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
<< Init->getSourceRange();
Diag(attr->getLocation(), diag::note_declared_required_constant_init_here)
<< attr->getRange();
+ if (getLangOpts().CPlusPlus11) {
+ APValue Value;
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ Init->EvaluateAsInitializer(Value, getASTContext(), var, Notes);
+ for (auto &it : Notes)
+ Diag(it.first, it.second);
+ } else {
+ Diag(CacheCulprit->getExprLoc(),
+ diag::note_invalid_subexpr_in_const_expr)
+ << CacheCulprit->getSourceRange();
+ }
}
}
else if (!var->isConstexpr() && IsGlobal &&
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index 370461c4a24e..2c8080dbf02b 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -1851,10 +1851,6 @@ Decl *Sema::ActOnStartCategoryImplementation(
// FIXME: PushOnScopeChains?
CurContext->addDecl(CDecl);
- // If the interface is deprecated/unavailable, warn/error about it.
- if (IDecl)
- DiagnoseUseOfDecl(IDecl, ClassLoc);
-
// If the interface has the objc_runtime_visible attribute, we
// cannot implement a category for it.
if (IDecl && IDecl->hasAttr<ObjCRuntimeVisibleAttr>()) {
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 8500b748a3ec..a05f7a7e406b 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -4080,24 +4080,23 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
return true;
- // C++0x [meta.unary.prop] Table 49 requires the following traits to be
- // applied to a complete type.
+ // C++1z [meta.unary.prop]:
+ // remove_all_extents_t<T> shall be a complete type or cv void.
case UTT_IsAggregate:
case UTT_IsTrivial:
case UTT_IsTriviallyCopyable:
case UTT_IsStandardLayout:
case UTT_IsPOD:
case UTT_IsLiteral:
+ ArgTy = QualType(ArgTy->getBaseElementTypeUnsafe(), 0);
+ LLVM_FALLTHROUGH;
+ // C++1z [meta.unary.prop]:
+ // T shall be a complete type, cv void, or an array of unknown bound.
case UTT_IsDestructible:
case UTT_IsNothrowDestructible:
- // Fall-through
-
- // These trait expressions are designed to help implement predicates in
- // [meta.unary.prop] despite not being named the same. They are specified
- // by both GCC and the Embarcadero C++ compiler, and require the complete
- // type due to the overarching C++0x type predicates being implemented
- // requiring the complete type.
+ case UTT_IsTriviallyDestructible:
+ // Per the GCC type traits documentation, the same constraints apply to these.
case UTT_HasNothrowAssign:
case UTT_HasNothrowMoveAssign:
case UTT_HasNothrowConstructor:
@@ -4109,17 +4108,11 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_HasTrivialCopy:
case UTT_HasTrivialDestructor:
case UTT_HasVirtualDestructor:
- // Arrays of unknown bound are expressly allowed.
- QualType ElTy = ArgTy;
- if (ArgTy->isIncompleteArrayType())
- ElTy = S.Context.getAsArrayType(ArgTy)->getElementType();
-
- // The void type is expressly allowed.
- if (ElTy->isVoidType())
+ if (ArgTy->isIncompleteArrayType() || ArgTy->isVoidType())
return true;
return !S.RequireCompleteType(
- Loc, ElTy, diag::err_incomplete_type_used_in_type_trait_expr);
+ Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
}
}
@@ -4356,6 +4349,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
!RD->hasNonTrivialCopyAssignment();
return false;
case UTT_IsDestructible:
+ case UTT_IsTriviallyDestructible:
case UTT_IsNothrowDestructible:
// C++14 [meta.unary.prop]:
// For reference types, is_destructible<T>::value is true.
@@ -4373,6 +4367,11 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
if (T->isIncompleteType() || T->isFunctionType())
return false;
+ // A type that requires destruction (via a non-trivial destructor or ARC
+ // lifetime semantics) is not trivially-destructible.
+ if (UTT == UTT_IsTriviallyDestructible && T.isDestructedType())
+ return false;
+
// C++14 [meta.unary.prop]:
// For object types and given U equal to remove_all_extents_t<T>, if the
// expression std::declval<U&>().~U() is well-formed when treated as an
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 1ba84034fa47..7bdd8872456a 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -49,12 +49,12 @@ static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
static ExprResult
CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
bool HadMultipleCandidates,
- SourceLocation Loc = SourceLocation(),
+ SourceLocation Loc = SourceLocation(),
const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
if (S.DiagnoseUseOfDecl(FoundDecl, Loc))
- return ExprError();
+ return ExprError();
// If FoundDecl is different from Fn (such as if one is a template
- // and the other a specialization), make sure DiagnoseUseOfDecl is
+ // and the other a specialization), make sure DiagnoseUseOfDecl is
// called on both.
// FIXME: This would be more comprehensively addressed by modifying
// DiagnoseUseOfDecl to accept both the FoundDecl and the decl
@@ -79,7 +79,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
bool CStyle,
bool AllowObjCWritebackConversion);
-static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
+static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
@@ -330,13 +330,13 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
} else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) {
llvm::APSInt IntConstantValue;
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ assert(Initializer && "Unknown conversion expression");
// If it's value-dependent, we can't tell whether it's narrowing.
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
- if (Initializer &&
- Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
+ if (Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
// Convert the integer to the floating type.
llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(),
@@ -852,7 +852,7 @@ namespace {
Expr *Saved;
};
SmallVector<Entry, 2> Entries;
-
+
public:
void save(Sema &S, Expr *&E) {
assert(E->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
@@ -863,7 +863,7 @@ namespace {
void restore() {
for (SmallVectorImpl<Entry>::iterator
- i = Entries.begin(), e = Entries.end(); i != e; ++i)
+ i = Entries.begin(), e = Entries.end(); i != e; ++i)
*i->Addr = i->Saved;
}
};
@@ -1368,9 +1368,9 @@ Sema::TryImplicitConversion(Expr *From, QualType ToType,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion) {
- return ::TryImplicitConversion(*this, From, ToType,
+ return ::TryImplicitConversion(*this, From, ToType,
SuppressUserConversions, AllowExplicit,
- InOverloadResolution, CStyle,
+ InOverloadResolution, CStyle,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
}
@@ -1396,7 +1396,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Objective-C ARC: Determine whether we will allow the writeback conversion.
bool AllowObjCWritebackConversion
- = getLangOpts().ObjCAutoRefCount &&
+ = getLangOpts().ObjCAutoRefCount &&
(Action == AA_Passing || Action == AA_Sending);
if (getLangOpts().ObjC1)
CheckObjCBridgeRelatedConversions(From->getLocStart(),
@@ -1592,15 +1592,15 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// if the function type matches except for [[noreturn]], it's ok
if (!S.IsFunctionConversion(FromType,
S.ExtractUnqualifiedFunctionType(ToType), resultTy))
- // otherwise, only a boolean conversion is standard
- if (!ToType->isBooleanType())
- return false;
+ // otherwise, only a boolean conversion is standard
+ if (!ToType->isBooleanType())
+ return false;
}
// Check if the "from" expression is taking the address of an overloaded
// function and recompute the FromType accordingly. Take advantage of the
// fact that non-static member functions *must* have such an address-of
- // expression.
+ // expression.
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn);
if (Method && !Method->isStatic()) {
assert(isa<UnaryOperator>(From->IgnoreParens()) &&
@@ -1638,7 +1638,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS.First = ICK_Lvalue_To_Rvalue;
// C11 6.3.2.1p2:
- // ... if the lvalue has atomic type, the value has the non-atomic version
+ // ... if the lvalue has atomic type, the value has the non-atomic version
// of the type of the lvalue ...
if (const AtomicType *Atomic = FromType->getAs<AtomicType>())
FromType = Atomic->getValueType();
@@ -1890,12 +1890,12 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
}
static bool
-IsTransparentUnionStandardConversion(Sema &S, Expr* From,
+IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle) {
-
+
const RecordType *UT = ToType->getAsUnionType();
if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
return false;
@@ -2129,7 +2129,7 @@ BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
"Invalid similarly-qualified pointer type");
/// Conversions to 'id' subsume cv-qualifier conversions.
- if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
+ if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
return ToType.getUnqualifiedType();
QualType CanonFromPointee
@@ -2139,7 +2139,7 @@ BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
if (StripObjCLifetime)
Quals.removeObjCLifetime();
-
+
// Exact qualifier match -> return the pointer type we're converting to.
if (CanonToPointee.getLocalQualifiers() == Quals) {
// ToType is exactly what we need. Return it.
@@ -2323,21 +2323,21 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
ToType, Context);
return true;
}
-
+
return false;
}
-
+
/// \brief Adopt the given qualifiers for the given type.
static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
Qualifiers TQs = T.getQualifiers();
-
+
// Check whether qualifiers already match.
if (TQs == Qs)
return T;
-
+
if (Qs.compatiblyIncludes(TQs))
return Context.getQualifiedType(T, Qs);
-
+
return Context.getQualifiedType(T.getUnqualifiedType(), Qs);
}
@@ -2352,7 +2352,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
// The set of qualifiers on the type we're converting from.
Qualifiers FromQualifiers = FromType.getQualifiers();
-
+
// First, we handle all conversions on ObjC object pointer types.
const ObjCObjectPointerType* ToObjCPtr =
ToType->getAs<ObjCObjectPointerType>();
@@ -2443,7 +2443,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
ToPointeeType->getAs<ObjCObjectPointerType>() &&
isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
IncompatibleObjC)) {
-
+
ConvertedType = Context.getPointerType(ConvertedType);
ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
return true;
@@ -2526,46 +2526,46 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
/// this conversion.
bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType) {
- if (!getLangOpts().ObjCAutoRefCount ||
+ if (!getLangOpts().ObjCAutoRefCount ||
Context.hasSameUnqualifiedType(FromType, ToType))
return false;
-
+
// Parameter must be a pointer to __autoreleasing (with no other qualifiers).
QualType ToPointee;
if (const PointerType *ToPointer = ToType->getAs<PointerType>())
ToPointee = ToPointer->getPointeeType();
else
return false;
-
+
Qualifiers ToQuals = ToPointee.getQualifiers();
- if (!ToPointee->isObjCLifetimeType() ||
+ if (!ToPointee->isObjCLifetimeType() ||
ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
!ToQuals.withoutObjCLifetime().empty())
return false;
-
+
// Argument must be a pointer to __strong to __weak.
QualType FromPointee;
if (const PointerType *FromPointer = FromType->getAs<PointerType>())
FromPointee = FromPointer->getPointeeType();
else
return false;
-
+
Qualifiers FromQuals = FromPointee.getQualifiers();
if (!FromPointee->isObjCLifetimeType() ||
(FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong &&
FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak))
return false;
-
+
// Make sure that we have compatible qualifiers.
FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing);
if (!ToQuals.compatiblyIncludes(FromQuals))
return false;
-
+
// Remove qualifiers from the pointee type we're converting from; they
// aren't used in the compatibility check belong, and we'll be adding back
// qualifiers (with __autoreleasing) if the compatibility check succeeds.
FromPointee = FromPointee.getUnqualifiedType();
-
+
// The unqualified form of the pointee types must be compatible.
ToPointee = ToPointee.getUnqualifiedType();
bool IncompatibleObjC;
@@ -2574,7 +2574,7 @@ bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
else if (!isObjCPointerConversion(FromPointee, ToPointee, FromPointee,
IncompatibleObjC))
return false;
-
+
/// \brief Construct the type we're converting to, which is a pointer to
/// __autoreleasing pointee.
FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
@@ -2590,7 +2590,7 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
ToPointeeType = ToBlockPtr->getPointeeType();
else
return false;
-
+
QualType FromPointeeType;
if (const BlockPointerType *FromBlockPtr =
FromType->getAs<BlockPointerType>())
@@ -2600,24 +2600,24 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
// We have pointer to blocks, check whether the only
// differences in the argument and result types are in Objective-C
// pointer conversions. If so, we permit the conversion.
-
+
const FunctionProtoType *FromFunctionType
= FromPointeeType->getAs<FunctionProtoType>();
const FunctionProtoType *ToFunctionType
= ToPointeeType->getAs<FunctionProtoType>();
-
+
if (!FromFunctionType || !ToFunctionType)
return false;
if (Context.hasSameType(FromPointeeType, ToPointeeType))
return true;
-
+
// Perform the quick checks that will tell us whether these
// function types are obviously different.
if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
FromFunctionType->isVariadic() != ToFunctionType->isVariadic())
return false;
-
+
FunctionType::ExtInfo FromEInfo = FromFunctionType->getExtInfo();
FunctionType::ExtInfo ToEInfo = ToFunctionType->getExtInfo();
if (FromEInfo != ToEInfo)
@@ -2645,7 +2645,7 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
else
return false;
}
-
+
// Check argument types.
for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams();
ArgIdx != NumArgs; ++ArgIdx) {
@@ -2666,7 +2666,7 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
if (!Context.doFunctionTypesMatchOnExtParameterInfos(FromFunctionType,
ToFunctionType))
return false;
-
+
ConvertedType = ToType;
return true;
}
@@ -3012,7 +3012,7 @@ bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
static bool isNonTrivialObjCLifetimeConversion(Qualifiers FromQuals,
Qualifiers ToQuals) {
// Converting anything to const __unsafe_unretained is trivial.
- if (ToQuals.hasConst() &&
+ if (ToQuals.hasConst() &&
ToQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
return false;
@@ -3032,7 +3032,7 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
FromType = Context.getCanonicalType(FromType);
ToType = Context.getCanonicalType(ToType);
ObjCLifetimeConversion = false;
-
+
// If FromType and ToType are the same type, this is not a
// qualification conversion.
if (FromType.getUnqualifiedType() == ToType.getUnqualifiedType())
@@ -3058,7 +3058,7 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
// Ignore __unaligned qualifier if this type is void.
if (ToType.getUnqualifiedType()->isVoidType())
FromQuals.removeUnaligned();
-
+
// Objective-C ARC:
// Check Objective-C lifetime conversions.
if (FromQuals.getObjCLifetime() != ToQuals.getObjCLifetime() &&
@@ -3074,14 +3074,14 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
return false;
}
}
-
+
// Allow addition/removal of GC attributes but not changing GC attributes.
if (FromQuals.getObjCGCAttr() != ToQuals.getObjCGCAttr() &&
(!FromQuals.hasObjCGCAttr() || !ToQuals.hasObjCGCAttr())) {
FromQuals.removeObjCGCAttr();
ToQuals.removeObjCGCAttr();
}
-
+
// -- for every j > 0, if const is in cv 1,j then const is in cv
// 2,j, and similarly for volatile.
if (!CStyle && !ToQuals.compatiblyIncludes(FromQuals))
@@ -3119,13 +3119,13 @@ static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
const AtomicType *ToAtomic = ToType->getAs<AtomicType>();
if (!ToAtomic)
return false;
-
+
StandardConversionSequence InnerSCS;
- if (!IsStandardConversion(S, From, ToAtomic->getValueType(),
+ if (!IsStandardConversion(S, From, ToAtomic->getValueType(),
InOverloadResolution, InnerSCS,
CStyle, /*AllowObjCWritebackConversion=*/false))
return false;
-
+
SCS.Second = InnerSCS.Second;
SCS.setToType(1, InnerSCS.getToType(1));
SCS.Third = InnerSCS.Third;
@@ -3180,8 +3180,8 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
bool HadMultipleCandidates = (CandidateSet.size() > 1);
OverloadCandidateSet::iterator Best;
- switch (auto Result =
- CandidateSet.BestViableFunction(S, From->getLocStart(),
+ switch (auto Result =
+ CandidateSet.BestViableFunction(S, From->getLocStart(),
Best, true)) {
case OR_Deleted:
case OR_Success: {
@@ -3552,7 +3552,7 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
// Two implicit conversion sequences of the same form are
// indistinguishable conversion sequences unless one of the
// following rules apply: (C++ 13.3.3.2p3):
-
+
// List-initialization sequence L1 is a better conversion sequence than
// list-initialization sequence L2 if:
// - L1 converts to std::initializer_list<X> for some X and L2 does not, or,
@@ -3587,7 +3587,7 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
ICS1.UserDefined.After,
ICS2.UserDefined.After);
else
- Result = compareConversionFunctions(S,
+ Result = compareConversionFunctions(S,
ICS1.UserDefined.ConversionFunction,
ICS2.UserDefined.ConversionFunction);
}
@@ -3769,9 +3769,9 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
const ObjCObjectPointerType* FromObjCPtr2
= FromType2->getAs<ObjCObjectPointerType>();
if (FromObjCPtr1 && FromObjCPtr2) {
- bool AssignLeft = S.Context.canAssignObjCInterfaces(FromObjCPtr1,
+ bool AssignLeft = S.Context.canAssignObjCInterfaces(FromObjCPtr1,
FromObjCPtr2);
- bool AssignRight = S.Context.canAssignObjCInterfaces(FromObjCPtr2,
+ bool AssignRight = S.Context.canAssignObjCInterfaces(FromObjCPtr2,
FromObjCPtr1);
if (AssignLeft != AssignRight) {
return AssignLeft? ImplicitConversionSequence::Better
@@ -3809,13 +3809,13 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
if (UnqualT1 == UnqualT2) {
// Objective-C++ ARC: If the references refer to objects with different
// lifetimes, prefer bindings that don't change lifetime.
- if (SCS1.ObjCLifetimeConversionBinding !=
+ if (SCS1.ObjCLifetimeConversionBinding !=
SCS2.ObjCLifetimeConversionBinding) {
return SCS1.ObjCLifetimeConversionBinding
? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
}
-
+
// If the type is an array type, promote the element qualifiers to the
// type for comparison.
if (isa<ArrayType>(T1) && T1Quals)
@@ -3825,7 +3825,7 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
if (T2.isMoreQualifiedThan(T1))
return ImplicitConversionSequence::Better;
else if (T1.isMoreQualifiedThan(T2))
- return ImplicitConversionSequence::Worse;
+ return ImplicitConversionSequence::Worse;
}
}
@@ -3891,17 +3891,17 @@ CompareQualificationConversions(Sema &S,
ImplicitConversionSequence::CompareKind Result
= ImplicitConversionSequence::Indistinguishable;
-
+
// Objective-C++ ARC:
// Prefer qualification conversions not involving a change in lifetime
// to qualification conversions that do not change lifetime.
- if (SCS1.QualificationIncludesObjCLifetime !=
+ if (SCS1.QualificationIncludesObjCLifetime !=
SCS2.QualificationIncludesObjCLifetime) {
Result = SCS1.QualificationIncludesObjCLifetime
? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
}
-
+
while (S.Context.UnwrapSimilarPointerTypes(T1, T2)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
@@ -4033,7 +4033,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
= ToType1->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *ToPtr2
= ToType2->getAs<ObjCObjectPointerType>();
-
+
if (FromPtr1 && FromPtr2 && ToPtr1 && ToPtr2) {
// Apply the same conversion ranking rules for Objective-C pointer types
// that we do for C++ pointers to class types. However, we employ the
@@ -4048,7 +4048,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
bool ToAssignRight
= S.Context.canAssignObjCInterfaces(ToPtr2, ToPtr1);
- // A conversion to an a non-id object pointer type or qualified 'id'
+ // A conversion to an a non-id object pointer type or qualified 'id'
// type is better than a conversion to 'id'.
if (ToPtr1->isObjCIdType() &&
(ToPtr2->isObjCQualifiedIdType() || ToPtr2->getInterfaceDecl()))
@@ -4056,15 +4056,15 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
if (ToPtr2->isObjCIdType() &&
(ToPtr1->isObjCQualifiedIdType() || ToPtr1->getInterfaceDecl()))
return ImplicitConversionSequence::Better;
-
- // A conversion to a non-id object pointer type is better than a
- // conversion to a qualified 'id' type
+
+ // A conversion to a non-id object pointer type is better than a
+ // conversion to a qualified 'id' type
if (ToPtr1->isObjCQualifiedIdType() && ToPtr2->getInterfaceDecl())
return ImplicitConversionSequence::Worse;
if (ToPtr2->isObjCQualifiedIdType() && ToPtr1->getInterfaceDecl())
return ImplicitConversionSequence::Better;
-
- // A conversion to an a non-Class object pointer type or qualified 'Class'
+
+ // A conversion to an a non-Class object pointer type or qualified 'Class'
// type is better than a conversion to 'Class'.
if (ToPtr1->isObjCClassType() &&
(ToPtr2->isObjCQualifiedClassType() || ToPtr2->getInterfaceDecl()))
@@ -4072,8 +4072,8 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
if (ToPtr2->isObjCClassType() &&
(ToPtr1->isObjCQualifiedClassType() || ToPtr1->getInterfaceDecl()))
return ImplicitConversionSequence::Better;
-
- // A conversion to a non-Class object pointer type is better than a
+
+ // A conversion to a non-Class object pointer type is better than a
// conversion to a qualified 'Class' type.
if (ToPtr1->isObjCQualifiedClassType() && ToPtr2->getInterfaceDecl())
return ImplicitConversionSequence::Worse;
@@ -4108,7 +4108,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
: ImplicitConversionSequence::Worse;
}
}
-
+
// Ranking of member-pointer types.
if (SCS1.Second == ICK_Pointer_Member && SCS2.Second == ICK_Pointer_Member &&
FromType1->isMemberPointerType() && FromType2->isMemberPointerType() &&
@@ -4264,9 +4264,9 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
ObjCLifetimeConversion = true;
T1Quals.removeObjCLifetime();
- T2Quals.removeObjCLifetime();
+ T2Quals.removeObjCLifetime();
}
-
+
// MS compiler ignores __unaligned qualifier for references; do the same.
T1Quals.removeUnaligned();
T2Quals.removeUnaligned();
@@ -4313,7 +4313,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
bool DerivedToBase = false;
bool ObjCConversion = false;
bool ObjCLifetimeConversion = false;
-
+
// If we are initializing an rvalue reference, don't permit conversion
// functions that return lvalues.
if (!ConvTemplate && DeclType->isRValueReferenceType()) {
@@ -4322,7 +4322,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
if (RefType && !RefType->getPointeeType()->isFunctionType())
continue;
}
-
+
if (!ConvTemplate &&
S.CompareReferenceRelationship(
DeclLoc,
@@ -6051,24 +6051,24 @@ Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance,
NumNamedArgs = Method->param_size();
if (Args.size() < NumNamedArgs)
continue;
-
+
for (unsigned i = 0; i < NumNamedArgs; i++) {
// We can't do any type-checking on a type-dependent argument.
if (Args[i]->isTypeDependent()) {
Match = false;
break;
}
-
+
ParmVarDecl *param = Method->parameters()[i];
Expr *argExpr = Args[i];
assert(argExpr && "SelectBestMethod(): missing expression");
-
+
// Strip the unbridged-cast placeholder expression off unless it's
// a consumed argument.
if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
!param->hasAttr<CFConsumedAttr>())
argExpr = stripARCUnbridgedCast(argExpr);
-
+
// If the parameter is __unknown_anytype, move on to the next method.
if (param->getType() == Context.UnknownAnyTy) {
Match = false;
@@ -6754,7 +6754,7 @@ static bool isAllowableExplicitConversion(Sema &S,
return S.isObjCPointerConversion(ConvType, ToNonRefType, ConvertedType,
IncompatibleObjC);
}
-
+
/// AddConversionCandidate - Add a C++ conversion function as a
/// candidate in the candidate set (C++ [over.match.conv],
/// C++ [over.match.copy]). From is the expression we're converting from,
@@ -6785,8 +6785,8 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// Per C++ [over.match.conv]p1, [over.match.ref]p1, an explicit conversion
// operator is only a candidate if its return type is the target type or
// can be converted to the target type with a qualification conversion.
- if (Conversion->isExplicit() &&
- !isAllowableExplicitConversion(*this, ConvType, ToType,
+ if (Conversion->isExplicit() &&
+ !isAllowableExplicitConversion(*this, ConvType, ToType,
AllowObjCConversionOnExplicit))
return;
@@ -7230,7 +7230,7 @@ class BuiltinCandidateTypeSet {
/// \brief A flag indicating whether the nullptr type was present in the
/// candidate set.
bool HasNullPtrType;
-
+
/// Sema - The semantic analysis instance where we are building the
/// candidate type set.
Sema &SemaRef;
@@ -7314,14 +7314,14 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
} else {
PointeeTy = PointerTy->getPointeeType();
}
-
+
// Don't add qualified variants of arrays. For one, they're not allowed
// (the qualifier would sink to the element type), and for another, the
// only overload situation where it matters is subscript or pointer +- int,
// and those shouldn't have qualifier variants anyway.
if (PointeeTy->isArrayType())
return true;
-
+
unsigned BaseCVR = PointeeTy.getCVRQualifiers();
bool hasVolatile = VisibleQuals.hasVolatile();
bool hasRestrict = VisibleQuals.hasRestrict();
@@ -7331,24 +7331,24 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
if ((CVR | BaseCVR) != CVR) continue;
// Skip over volatile if no volatile found anywhere in the types.
if ((CVR & Qualifiers::Volatile) && !hasVolatile) continue;
-
+
// Skip over restrict if no restrict found anywhere in the types, or if
// the type cannot be restrict-qualified.
if ((CVR & Qualifiers::Restrict) &&
(!hasRestrict ||
(!(PointeeTy->isAnyPointerType() || PointeeTy->isReferenceType()))))
continue;
-
+
// Build qualified pointee type.
QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
-
+
// Build qualified pointer type.
QualType QPointerTy;
if (!buildObjCPtr)
QPointerTy = Context.getPointerType(QPointeeTy);
else
QPointerTy = Context.getObjCObjectPointerType(QPointeeTy);
-
+
// Insert qualified pointer type.
PointerTypes.insert(QPointerTy);
}
@@ -7705,7 +7705,7 @@ class BuiltinOperatorOverloadBuilder {
else
S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet);
}
-
+
// Add restrict version only if there are conversions to a restrict type
// and our candidate type is a non-restrict-qualified pointer.
if (HasRestrict && CandidateTy->isAnyPointerType() &&
@@ -7717,7 +7717,7 @@ class BuiltinOperatorOverloadBuilder {
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet);
else
S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet);
-
+
if (HasVolatile) {
ParamTypes[0]
= S.Context.getLValueReferenceType(
@@ -8274,7 +8274,7 @@ public:
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
}
-
+
if (!(*Ptr).isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()) {
// restrict version
@@ -8282,7 +8282,7 @@ public:
= S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
-
+
if (NeedVolatile) {
// volatile restrict version
ParamTypes[0]
@@ -8323,7 +8323,7 @@ public:
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/true);
}
-
+
if (!(*Ptr).isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()) {
// restrict version
@@ -8331,7 +8331,7 @@ public:
= S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/true);
-
+
if (NeedVolatile) {
// volatile restrict version
ParamTypes[0]
@@ -9424,13 +9424,13 @@ void Sema::NoteAllOverloadCandidates(Expr *OverloadedExpr, QualType DestType,
OverloadExpr *OvlExpr = Ovl.Expression;
for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
- IEnd = OvlExpr->decls_end();
+ IEnd = OvlExpr->decls_end();
I != IEnd; ++I) {
- if (FunctionTemplateDecl *FunTmpl =
+ if (FunctionTemplateDecl *FunTmpl =
dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) {
NoteOverloadCandidate(*I, FunTmpl->getTemplatedDecl(), DestType,
TakingAddress);
- } else if (FunctionDecl *Fun
+ } else if (FunctionDecl *Fun
= dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) {
NoteOverloadCandidate(*I, Fun, DestType, TakingAddress);
}
@@ -9608,7 +9608,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< FromTy << ToTy << (unsigned) isObjectArgument << I+1
<< (unsigned) (Cand->Fix.Kind);
-
+
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -9711,7 +9711,7 @@ static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
// right number of arguments, because only overloaded operators have
// the weird behavior of overloading member and non-member functions.
// Just don't report anything.
- if (Fn->isInvalidDecl() &&
+ if (Fn->isInvalidDecl() &&
Fn->getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
return true;
@@ -9735,9 +9735,9 @@ static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
"The templated declaration should at least be a function"
" when diagnosing bad template argument deduction due to too many"
" or too few arguments");
-
+
FunctionDecl *Fn = cast<FunctionDecl>(D);
-
+
// TODO: treat calls to a missing default constructor as a special case
const FunctionProtoType *FnTy = Fn->getType()->getAs<FunctionProtoType>();
unsigned MinParams = Fn->getMinRequiredArguments();
@@ -9993,8 +9993,8 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
return;
// FIXME: For generic lambda parameters, check if the function is a lambda
- // call operator, and if so, emit a prettier and more informative
- // diagnostic that mentions 'auto' and lambda in addition to
+ // call operator, and if so, emit a prettier and more informative
+ // diagnostic that mentions 'auto' and lambda in addition to
// (or instead of?) the canonical template type parameters.
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_non_deduced_mismatch)
@@ -10692,16 +10692,16 @@ void TemplateSpecCandidateSet::NoteCandidates(Sema &S, SourceLocation Loc) {
// R (S::*)(A) --> R (A)
QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) {
QualType Ret = PossiblyAFunctionType;
- if (const PointerType *ToTypePtr =
+ if (const PointerType *ToTypePtr =
PossiblyAFunctionType->getAs<PointerType>())
Ret = ToTypePtr->getPointeeType();
- else if (const ReferenceType *ToTypeRef =
+ else if (const ReferenceType *ToTypeRef =
PossiblyAFunctionType->getAs<ReferenceType>())
Ret = ToTypeRef->getPointeeType();
else if (const MemberPointerType *MemTypePtr =
- PossiblyAFunctionType->getAs<MemberPointerType>())
- Ret = MemTypePtr->getPointeeType();
- Ret =
+ PossiblyAFunctionType->getAs<MemberPointerType>())
+ Ret = MemTypePtr->getPointeeType();
+ Ret =
Context.getCanonicalType(Ret).getUnqualifiedType();
return Ret;
}
@@ -10727,9 +10727,9 @@ namespace {
class AddressOfFunctionResolver {
Sema& S;
Expr* SourceExpr;
- const QualType& TargetType;
- QualType TargetFunctionType; // Extracted function type from target type
-
+ const QualType& TargetType;
+ QualType TargetFunctionType; // Extracted function type from target type
+
bool Complain;
//DeclAccessPair& ResultFunctionAccessPair;
ASTContext& Context;
@@ -10739,7 +10739,7 @@ class AddressOfFunctionResolver {
bool StaticMemberFunctionFromBoundPointer;
bool HasComplained;
- OverloadExpr::FindResult OvlExprInfo;
+ OverloadExpr::FindResult OvlExprInfo;
OverloadExpr *OvlExpr;
TemplateArgumentListInfo OvlExplicitTemplateArgs;
SmallVector<std::pair<DeclAccessPair, FunctionDecl*>, 4> Matches;
@@ -10786,7 +10786,7 @@ public:
}
return;
}
-
+
if (OvlExpr->hasExplicitTemplateArgs())
OvlExpr->copyTemplateArgumentsInto(OvlExplicitTemplateArgs);
@@ -10864,7 +10864,7 @@ private:
}
// return true if any matching specializations were found
- bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate,
+ bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate,
const DeclAccessPair& CurAccessFunPair) {
if (CXXMethodDecl *Method
= dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) {
@@ -10872,7 +10872,7 @@ private:
// static when converting to member pointer.
if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
return false;
- }
+ }
else if (TargetTypeIsNonStaticMemberFunction)
return false;
@@ -10885,17 +10885,17 @@ private:
FunctionDecl *Specialization = nullptr;
TemplateDeductionInfo Info(FailedCandidates.getLocation());
if (Sema::TemplateDeductionResult Result
- = S.DeduceTemplateArguments(FunctionTemplate,
+ = S.DeduceTemplateArguments(FunctionTemplate,
&OvlExplicitTemplateArgs,
- TargetFunctionType, Specialization,
+ TargetFunctionType, Specialization,
Info, /*IsAddressOfFunction*/true)) {
// Make a note of the failed deduction for diagnostics.
FailedCandidates.addCandidate()
.set(CurAccessFunPair, FunctionTemplate->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, Result, Info));
return false;
- }
-
+ }
+
// Template argument deduction ensures that we have an exact match or
// compatible pointer-to-function arguments that would be adjusted by ICS.
// This function template specicalization works.
@@ -10909,15 +10909,15 @@ private:
Matches.push_back(std::make_pair(CurAccessFunPair, Specialization));
return true;
}
-
- bool AddMatchingNonTemplateFunction(NamedDecl* Fn,
+
+ bool AddMatchingNonTemplateFunction(NamedDecl* Fn,
const DeclAccessPair& CurAccessFunPair) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
// Skip non-static functions when converting to pointer, and static
// when converting to member pointer.
if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
return false;
- }
+ }
else if (TargetTypeIsNonStaticMemberFunction)
return false;
@@ -10947,20 +10947,20 @@ private:
return true;
}
}
-
+
return false;
}
-
+
bool FindAllFunctionsThatMatchTargetTypeExactly() {
bool Ret = false;
-
+
// If the overload expression doesn't have the form of a pointer to
// member, don't try to convert it to a pointer-to-member type.
if (IsInvalidFormOfPointerToMemberFunction())
return false;
for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
- E = OvlExpr->decls_end();
+ E = OvlExpr->decls_end();
I != E; ++I) {
// Look through any using declarations to find the underlying function.
NamedDecl *Fn = (*I)->getUnderlyingDecl();
@@ -11103,12 +11103,12 @@ public:
bool hadMultipleCandidates() const { return (OvlExpr->getNumDecls() > 1); }
int getNumMatches() const { return Matches.size(); }
-
+
FunctionDecl* getMatchingFunctionDecl() const {
if (Matches.size() != 1) return nullptr;
return Matches[0].second;
}
-
+
const DeclAccessPair* getMatchingFunctionAccessPair() const {
if (Matches.size() != 1) return nullptr;
return &Matches[0].first;
@@ -11248,7 +11248,7 @@ bool Sema::resolveAndFixAddressOfOnlyViableOverloadCandidate(
/// If no template-ids are found, no diagnostics are emitted and NULL is
/// returned.
FunctionDecl *
-Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
+Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain,
DeclAccessPair *FoundResult) {
// C++ [over.over]p1:
@@ -11311,9 +11311,9 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
}
return nullptr;
}
-
+
Matched = Specialization;
- if (FoundResult) *FoundResult = I.getPair();
+ if (FoundResult) *FoundResult = I.getPair();
}
if (Matched &&
@@ -11336,8 +11336,8 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
// returns true if 'complain' is set.
bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr, bool doFunctionPointerConverion,
- bool complain, SourceRange OpRangeForComplaining,
- QualType DestTypeForComplaining,
+ bool complain, SourceRange OpRangeForComplaining,
+ QualType DestTypeForComplaining,
unsigned DiagIDForComplaining) {
assert(SrcExpr.get()->getType() == Context.OverloadTy);
@@ -11394,7 +11394,7 @@ bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
Diag(OpRangeForComplaining.getBegin(), DiagIDForComplaining)
<< ovl.Expression->getName()
<< DestTypeForComplaining
- << OpRangeForComplaining
+ << OpRangeForComplaining
<< ovl.Expression->getQualifierLoc().getSourceRange();
NoteAllOverloadCandidates(SrcExpr.get());
@@ -12183,8 +12183,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// TODO: provide better source location info in DNLoc component.
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
UnresolvedLookupExpr *Fn
- = UnresolvedLookupExpr::Create(Context, NamingClass,
- NestedNameSpecifierLoc(), OpNameInfo,
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
+ NestedNameSpecifierLoc(), OpNameInfo,
/*ADL*/ true, IsOverloaded(Fns),
Fns.begin(), Fns.end());
return new (Context)
@@ -12747,12 +12747,12 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc()))
return ExprError();
// If FoundDecl is different from Method (such as if one is a template
- // and the other a specialization), make sure DiagnoseUseOfDecl is
+ // and the other a specialization), make sure DiagnoseUseOfDecl is
// called on both.
// FIXME: This would be more comprehensively addressed by modifying
// DiagnoseUseOfDecl to accept both the FoundDecl and the decl
// being used.
- if (Method != FoundDecl.getDecl() &&
+ if (Method != FoundDecl.getDecl() &&
DiagnoseUseOfDecl(Method, UnresExpr->getNameLoc()))
return ExprError();
break;
@@ -12775,7 +12775,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
case OR_Deleted:
Diag(UnresExpr->getMemberLoc(), diag::err_ovl_deleted_member_call)
<< Best->Function->isDeleted()
- << DeclName
+ << DeclName
<< getDeletedOrUnavailableSuffix(Best->Function)
<< MemExprE->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
@@ -12848,8 +12848,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
}
- if ((isa<CXXConstructorDecl>(CurContext) ||
- isa<CXXDestructorDecl>(CurContext)) &&
+ if ((isa<CXXConstructorDecl>(CurContext) ||
+ isa<CXXDestructorDecl>(CurContext)) &&
TheCall->getMethodDecl()->isPure()) {
const CXXMethodDecl *MD = TheCall->getMethodDecl();
@@ -12929,7 +12929,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
}
// C++ [over.call.object]p2:
- // In addition, for each (non-explicit in C++0x) conversion function
+ // In addition, for each (non-explicit in C++0x) conversion function
// declared in T of the form
//
// operator conversion-type-id () cv-qualifier;
@@ -13008,7 +13008,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
Diag(Object.get()->getLocStart(),
diag::err_ovl_deleted_object_call)
<< Best->Function->isDeleted()
- << Object.get()->getType()
+ << Object.get()->getType()
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Object.get()->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
@@ -13031,7 +13031,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
Best->FoundDecl);
if (DiagnoseUseOfDecl(Best->FoundDecl, LParenLoc))
return ExprError();
- assert(Conv == Best->FoundDecl.getDecl() &&
+ assert(Conv == Best->FoundDecl.getDecl() &&
"Found Decl & conversion-to-functionptr should be same, right?!");
// We selected one of the surrogate functions that converts the
// object parameter to a function pointer. Perform the conversion
@@ -13242,7 +13242,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
case OR_Deleted:
Diag(OpLoc, diag::err_ovl_deleted_oper)
<< Best->Function->isDeleted()
- << "->"
+ << "->"
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Base->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index f8970744389b..e7315934b515 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -3175,11 +3175,7 @@ getCCForDeclaratorChunk(Sema &S, Declarator &D,
for (const AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
Attr; Attr = Attr->getNext()) {
if (Attr->getKind() == AttributeList::AT_OpenCLKernel) {
- llvm::Triple::ArchType arch = S.Context.getTargetInfo().getTriple().getArch();
- if (arch == llvm::Triple::spir || arch == llvm::Triple::spir64 ||
- arch == llvm::Triple::amdgcn || arch == llvm::Triple::r600) {
- CC = CC_OpenCLKernel;
- }
+ CC = CC_OpenCLKernel;
break;
}
}
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index c931b13f65f3..95cb54f944e4 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -1422,8 +1422,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
getClangFullRepositoryVersion());
}
- if (WritingModule) {
+ if (WritingModule) {
// Module name
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(MODULE_NAME));
@@ -1466,9 +1466,10 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.clear();
auto &Map = PP.getHeaderSearchInfo().getModuleMap();
-
- // Primary module map file.
- AddPath(Map.getModuleMapFileForUniquing(WritingModule)->getName(), Record);
+ AddPath(WritingModule->PresumedModuleMapFile.empty()
+ ? Map.getModuleMapFileForUniquing(WritingModule)->getName()
+ : StringRef(WritingModule->PresumedModuleMapFile),
+ Record);
// Additional module map files.
if (auto *AdditionalModMaps =
diff --git a/test/CodeGen/arm_neon_intrinsics.c b/test/CodeGen/arm_neon_intrinsics.c
index ae7c78e08f86..62888dd73339 100644
--- a/test/CodeGen/arm_neon_intrinsics.c
+++ b/test/CodeGen/arm_neon_intrinsics.c
@@ -3,8 +3,6 @@
// RUN: -disable-O0-optnone -emit-llvm -o - %s \
// RUN: | opt -S -mem2reg | FileCheck %s
-// REQUIRES: long-tests
-
#include <arm_neon.h>
// CHECK-LABEL: @test_vaba_s8(
@@ -3481,11 +3479,11 @@ float32_t test_vgetq_lane_f32(float32x4_t a) {
}
// CHECK-LABEL: @test_vgetq_lane_f16(
-// CHECK: [[__REINT_244:%.*]] = alloca <8 x half>, align 8
+// CHECK: [[__REINT_244:%.*]] = alloca <8 x half>, align 16
// CHECK: [[__REINT1_244:%.*]] = alloca i16, align 2
-// CHECK: store <8 x half> %a, <8 x half>* [[__REINT_244]], align 8
+// CHECK: store <8 x half> %a, <8 x half>* [[__REINT_244]], align 16
// CHECK: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_244]] to <8 x i16>*
-// CHECK: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 8
+// CHECK: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to <16 x i8>
// CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16>
// CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i16> [[TMP3]], i32 3
@@ -4543,7 +4541,7 @@ poly16x4_t test_vld1_lane_p16(poly16_t const * a, poly16x4_t b) {
}
// CHECK-LABEL: @test_vld2q_u8(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[__RET]] to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <16 x i8>, <16 x i8>
uint8x16x2_t test_vld2q_u8(uint8_t const * a) {
@@ -4551,7 +4549,7 @@ uint8x16x2_t test_vld2q_u8(uint8_t const * a) {
}
// CHECK-LABEL: @test_vld2q_u16(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x8x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <8 x i16>, <8 x i16>
@@ -4560,7 +4558,7 @@ uint16x8x2_t test_vld2q_u16(uint16_t const * a) {
}
// CHECK-LABEL: @test_vld2q_u32(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <4 x i32>, <4 x i32>
@@ -4569,7 +4567,7 @@ uint32x4x2_t test_vld2q_u32(uint32_t const * a) {
}
// CHECK-LABEL: @test_vld2q_s8(
-// CHECK: [[__RET:%.*]] = alloca %struct.int8x16x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[__RET]] to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <16 x i8>, <16 x i8>
int8x16x2_t test_vld2q_s8(int8_t const * a) {
@@ -4577,7 +4575,7 @@ int8x16x2_t test_vld2q_s8(int8_t const * a) {
}
// CHECK-LABEL: @test_vld2q_s16(
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x8x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <8 x i16>, <8 x i16>
@@ -4586,7 +4584,7 @@ int16x8x2_t test_vld2q_s16(int16_t const * a) {
}
// CHECK-LABEL: @test_vld2q_s32(
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <4 x i32>, <4 x i32>
@@ -4595,7 +4593,7 @@ int32x4x2_t test_vld2q_s32(int32_t const * a) {
}
// CHECK-LABEL: @test_vld2q_f16(
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <8 x i16>, <8 x i16>
@@ -4604,7 +4602,7 @@ float16x8x2_t test_vld2q_f16(float16_t const * a) {
}
// CHECK-LABEL: @test_vld2q_f32(
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <4 x float>, <4 x float>
@@ -4613,7 +4611,7 @@ float32x4x2_t test_vld2q_f32(float32_t const * a) {
}
// CHECK-LABEL: @test_vld2q_p8(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[__RET]] to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <16 x i8>, <16 x i8>
poly8x16x2_t test_vld2q_p8(poly8_t const * a) {
@@ -4621,7 +4619,7 @@ poly8x16x2_t test_vld2q_p8(poly8_t const * a) {
}
// CHECK-LABEL: @test_vld2q_p16(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x2_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x8x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD2Q_V:%.*]] = call { <8 x i16>, <8 x i16>
@@ -4840,24 +4838,24 @@ poly16x4x2_t test_vld2_dup_p16(poly16_t const * a) {
}
// CHECK-LABEL: @test_vld2q_lane_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint16x8x2_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -4867,24 +4865,24 @@ uint16x8x2_t test_vld2q_lane_u16(uint16_t const * a, uint16x8x2_t b) {
}
// CHECK-LABEL: @test_vld2q_lane_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x i32>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint32x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32>
@@ -4894,24 +4892,24 @@ uint32x4x2_t test_vld2q_lane_u32(uint32_t const * a, uint32x4x2_t b) {
}
// CHECK-LABEL: @test_vld2q_lane_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.int16x8x2_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -4921,24 +4919,24 @@ int16x8x2_t test_vld2q_lane_s16(int16_t const * a, int16x8x2_t b) {
}
// CHECK-LABEL: @test_vld2q_lane_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x i32>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.int32x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32>
@@ -4948,24 +4946,24 @@ int32x4x2_t test_vld2q_lane_s32(int32_t const * a, int32x4x2_t b) {
}
// CHECK-LABEL: @test_vld2q_lane_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x half>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -4975,24 +4973,24 @@ float16x8x2_t test_vld2q_lane_f16(float16_t const * a, float16x8x2_t b) {
}
// CHECK-LABEL: @test_vld2q_lane_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x float>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.float32x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x float>], [2 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x float> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x float>], [2 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x float> [[TMP7]] to <16 x i8>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x float>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x float>
@@ -5002,24 +5000,24 @@ float32x4x2_t test_vld2q_lane_f32(float32_t const * a, float32x4x2_t b) {
}
// CHECK-LABEL: @test_vld2q_lane_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.poly16x8x2_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -5284,7 +5282,7 @@ poly16x4x2_t test_vld2_lane_p16(poly16_t const * a, poly16x4x2_t b) {
}
// CHECK-LABEL: @test_vld3q_u8(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x3_t* [[__RET]] to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>
uint8x16x3_t test_vld3q_u8(uint8_t const * a) {
@@ -5292,7 +5290,7 @@ uint8x16x3_t test_vld3q_u8(uint8_t const * a) {
}
// CHECK-LABEL: @test_vld3q_u16(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x8x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>
@@ -5301,7 +5299,7 @@ uint16x8x3_t test_vld3q_u16(uint16_t const * a) {
}
// CHECK-LABEL: @test_vld3q_u32(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>
@@ -5310,7 +5308,7 @@ uint32x4x3_t test_vld3q_u32(uint32_t const * a) {
}
// CHECK-LABEL: @test_vld3q_s8(
-// CHECK: [[__RET:%.*]] = alloca %struct.int8x16x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int8x16x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x3_t* [[__RET]] to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>
int8x16x3_t test_vld3q_s8(int8_t const * a) {
@@ -5318,7 +5316,7 @@ int8x16x3_t test_vld3q_s8(int8_t const * a) {
}
// CHECK-LABEL: @test_vld3q_s16(
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x8x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>
@@ -5327,7 +5325,7 @@ int16x8x3_t test_vld3q_s16(int16_t const * a) {
}
// CHECK-LABEL: @test_vld3q_s32(
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>
@@ -5336,7 +5334,7 @@ int32x4x3_t test_vld3q_s32(int32_t const * a) {
}
// CHECK-LABEL: @test_vld3q_f16(
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>
@@ -5345,7 +5343,7 @@ float16x8x3_t test_vld3q_f16(float16_t const * a) {
}
// CHECK-LABEL: @test_vld3q_f32(
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <4 x float>, <4 x float>, <4 x float>
@@ -5354,7 +5352,7 @@ float32x4x3_t test_vld3q_f32(float32_t const * a) {
}
// CHECK-LABEL: @test_vld3q_p8(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x3_t* [[__RET]] to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>
poly8x16x3_t test_vld3q_p8(poly8_t const * a) {
@@ -5362,7 +5360,7 @@ poly8x16x3_t test_vld3q_p8(poly8_t const * a) {
}
// CHECK-LABEL: @test_vld3q_p16(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x3_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x3_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x8x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD3Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>
@@ -5581,28 +5579,28 @@ poly16x4x3_t test_vld3_dup_p16(poly16_t const * a) {
}
// CHECK-LABEL: @test_vld3q_lane_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint16x8x3_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -5613,28 +5611,28 @@ uint16x8x3_t test_vld3q_lane_u16(uint16_t const * a, uint16x8x3_t b) {
}
// CHECK-LABEL: @test_vld3q_lane_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x i32>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint32x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP9]] to <16 x i8>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32>
@@ -5645,28 +5643,28 @@ uint32x4x3_t test_vld3q_lane_u32(uint32_t const * a, uint32x4x3_t b) {
}
// CHECK-LABEL: @test_vld3q_lane_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.int16x8x3_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -5677,28 +5675,28 @@ int16x8x3_t test_vld3q_lane_s16(int16_t const * a, int16x8x3_t b) {
}
// CHECK-LABEL: @test_vld3q_lane_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x i32>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.int32x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP9]] to <16 x i8>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32>
@@ -5709,28 +5707,28 @@ int32x4x3_t test_vld3q_lane_s32(int32_t const * a, int32x4x3_t b) {
}
// CHECK-LABEL: @test_vld3q_lane_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x half>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <8 x half> [[TMP9]] to <16 x i8>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -5741,28 +5739,28 @@ float16x8x3_t test_vld3q_lane_f16(float16_t const * a, float16x8x3_t b) {
}
// CHECK-LABEL: @test_vld3q_lane_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x float>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.float32x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x float> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x float> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <4 x float> [[TMP9]] to <16 x i8>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x float>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x float>
@@ -5773,28 +5771,28 @@ float32x4x3_t test_vld3q_lane_f32(float32_t const * a, float32x4x3_t b) {
}
// CHECK-LABEL: @test_vld3q_lane_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.poly16x8x3_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -6104,7 +6102,7 @@ poly16x4x3_t test_vld3_lane_p16(poly16_t const * a, poly16x4x3_t b) {
}
// CHECK-LABEL: @test_vld4q_u8(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x4_t* [[__RET]] to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>
uint8x16x4_t test_vld4q_u8(uint8_t const * a) {
@@ -6112,7 +6110,7 @@ uint8x16x4_t test_vld4q_u8(uint8_t const * a) {
}
// CHECK-LABEL: @test_vld4q_u16(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x8x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>
@@ -6121,7 +6119,7 @@ uint16x8x4_t test_vld4q_u16(uint16_t const * a) {
}
// CHECK-LABEL: @test_vld4q_u32(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>
@@ -6130,7 +6128,7 @@ uint32x4x4_t test_vld4q_u32(uint32_t const * a) {
}
// CHECK-LABEL: @test_vld4q_s8(
-// CHECK: [[__RET:%.*]] = alloca %struct.int8x16x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int8x16x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x4_t* [[__RET]] to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>
int8x16x4_t test_vld4q_s8(int8_t const * a) {
@@ -6138,7 +6136,7 @@ int8x16x4_t test_vld4q_s8(int8_t const * a) {
}
// CHECK-LABEL: @test_vld4q_s16(
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x8x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>
@@ -6147,7 +6145,7 @@ int16x8x4_t test_vld4q_s16(int16_t const * a) {
}
// CHECK-LABEL: @test_vld4q_s32(
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>
@@ -6156,7 +6154,7 @@ int32x4x4_t test_vld4q_s32(int32_t const * a) {
}
// CHECK-LABEL: @test_vld4q_f16(
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>
@@ -6165,7 +6163,7 @@ float16x8x4_t test_vld4q_f16(float16_t const * a) {
}
// CHECK-LABEL: @test_vld4q_f32(
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <4 x float>, <4 x float>, <4 x float>, <4 x float>
@@ -6174,7 +6172,7 @@ float32x4x4_t test_vld4q_f32(float32_t const * a) {
}
// CHECK-LABEL: @test_vld4q_p8(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x4_t* [[__RET]] to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>
poly8x16x4_t test_vld4q_p8(poly8_t const * a) {
@@ -6182,7 +6180,7 @@ poly8x16x4_t test_vld4q_p8(poly8_t const * a) {
}
// CHECK-LABEL: @test_vld4q_p16(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x4_t, align 8
+// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x4_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x8x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD4Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>
@@ -6401,32 +6399,32 @@ poly16x4x4_t test_vld4_dup_p16(poly16_t const * a) {
}
// CHECK-LABEL: @test_vld4q_lane_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint16x8x4_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP11:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP11:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP12:%.*]] = bitcast <8 x i16> [[TMP11]] to <16 x i8>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -6438,32 +6436,32 @@ uint16x8x4_t test_vld4q_lane_u16(uint16_t const * a, uint16x8x4_t b) {
}
// CHECK-LABEL: @test_vld4q_lane_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x i32>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint32x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP9]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP11:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP11:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP12:%.*]] = bitcast <4 x i32> [[TMP11]] to <16 x i8>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32>
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32>
@@ -6475,32 +6473,32 @@ uint32x4x4_t test_vld4q_lane_u32(uint32_t const * a, uint32x4x4_t b) {
}
// CHECK-LABEL: @test_vld4q_lane_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.int16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.int16x8x4_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP11:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP11:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP12:%.*]] = bitcast <8 x i16> [[TMP11]] to <16 x i8>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -6512,32 +6510,32 @@ int16x8x4_t test_vld4q_lane_s16(int16_t const * a, int16x8x4_t b) {
}
// CHECK-LABEL: @test_vld4q_lane_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.int32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x i32>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.int32x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP9]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP11:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP11:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP12:%.*]] = bitcast <4 x i32> [[TMP11]] to <16 x i8>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32>
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32>
@@ -6549,32 +6547,32 @@ int32x4x4_t test_vld4q_lane_s32(int32_t const * a, int32x4x4_t b) {
}
// CHECK-LABEL: @test_vld4q_lane_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x half>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <8 x half> [[TMP9]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP11:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP11:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP12:%.*]] = bitcast <8 x half> [[TMP11]] to <16 x i8>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -6586,32 +6584,32 @@ float16x8x4_t test_vld4q_lane_f16(float16_t const * a, float16x8x4_t b) {
}
// CHECK-LABEL: @test_vld4q_lane_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.float32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x float>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.float32x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <4 x float> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <4 x float> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <4 x float> [[TMP9]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP11:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP11:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP12:%.*]] = bitcast <4 x float> [[TMP11]] to <16 x i8>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x float>
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x float>
@@ -6623,32 +6621,32 @@ float32x4x4_t test_vld4q_lane_f32(float32_t const * a, float32x4x4_t b) {
}
// CHECK-LABEL: @test_vld4q_lane_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 8
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16
+// CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast %struct.poly16x8x4_t* [[__RET]] to i8*
// CHECK: [[TMP4:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP11:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP11:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP12:%.*]] = bitcast <8 x i16> [[TMP11]] to <16 x i8>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
@@ -14549,21 +14547,21 @@ float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) {
// CHECK-LABEL: @test_vsetq_lane_f16(
// CHECK: [[__REINT_248:%.*]] = alloca half, align 2
-// CHECK: [[__REINT1_248:%.*]] = alloca <8 x half>, align 8
-// CHECK: [[__REINT2_248:%.*]] = alloca <8 x i16>, align 8
+// CHECK: [[__REINT1_248:%.*]] = alloca <8 x half>, align 16
+// CHECK: [[__REINT2_248:%.*]] = alloca <8 x i16>, align 16
// CHECK: [[TMP0:%.*]] = load half, half* %a, align 2
// CHECK: store half [[TMP0]], half* [[__REINT_248]], align 2
-// CHECK: store <8 x half> %b, <8 x half>* [[__REINT1_248]], align 8
+// CHECK: store <8 x half> %b, <8 x half>* [[__REINT1_248]], align 16
// CHECK: [[TMP1:%.*]] = bitcast half* [[__REINT_248]] to i16*
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK: [[TMP3:%.*]] = bitcast <8 x half>* [[__REINT1_248]] to <8 x i16>*
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[TMP3]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[TMP3]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[TMP2]], i32 3
-// CHECK: store <8 x i16> [[VSET_LANE]], <8 x i16>* [[__REINT2_248]], align 8
+// CHECK: store <8 x i16> [[VSET_LANE]], <8 x i16>* [[__REINT2_248]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16>* [[__REINT2_248]] to <8 x half>*
-// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[TMP7]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[TMP7]], align 16
// CHECK: ret <8 x half> [[TMP8]]
float16x8_t test_vsetq_lane_f16(float16_t *a, float16x8_t b) {
return vsetq_lane_f16(*a, b, 3);
@@ -16194,20 +16192,20 @@ void test_vst1_lane_p16(poly16_t * a, poly16x4_t b) {
}
// CHECK-LABEL: @test_vst2q_u8(
-// CHECK: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <16 x i8>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint8x16x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint8x16x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], [2 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], [2 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: call void @llvm.arm.neon.vst2.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], i32 1)
// CHECK: ret void
void test_vst2q_u8(uint8_t * a, uint8x16x2_t b) {
@@ -16215,22 +16213,22 @@ void test_vst2q_u8(uint8_t * a, uint8x16x2_t b) {
}
// CHECK-LABEL: @test_vst2q_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -16241,22 +16239,22 @@ void test_vst2q_u16(uint16_t * a, uint16x8x2_t b) {
}
// CHECK-LABEL: @test_vst2q_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x i32>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -16267,20 +16265,20 @@ void test_vst2q_u32(uint32_t * a, uint32x4x2_t b) {
}
// CHECK-LABEL: @test_vst2q_s8(
-// CHECK: [[B:%.*]] = alloca %struct.int8x16x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int8x16x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <16 x i8>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int8x16x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int8x16x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], [2 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], [2 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: call void @llvm.arm.neon.vst2.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], i32 1)
// CHECK: ret void
void test_vst2q_s8(int8_t * a, int8x16x2_t b) {
@@ -16288,22 +16286,22 @@ void test_vst2q_s8(int8_t * a, int8x16x2_t b) {
}
// CHECK-LABEL: @test_vst2q_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -16314,22 +16312,22 @@ void test_vst2q_s16(int16_t * a, int16x8x2_t b) {
}
// CHECK-LABEL: @test_vst2q_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x i32>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -16340,22 +16338,22 @@ void test_vst2q_s32(int32_t * a, int32x4x2_t b) {
}
// CHECK-LABEL: @test_vst2q_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x half>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x half> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -16366,22 +16364,22 @@ void test_vst2q_f16(float16_t * a, float16x8x2_t b) {
}
// CHECK-LABEL: @test_vst2q_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x float>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x float>], [2 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x float> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x float>], [2 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x float> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x float>
@@ -16392,20 +16390,20 @@ void test_vst2q_f32(float32_t * a, float32x4x2_t b) {
}
// CHECK-LABEL: @test_vst2q_p8(
-// CHECK: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <16 x i8>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly8x16x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly8x16x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], [2 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], [2 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: call void @llvm.arm.neon.vst2.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], i32 1)
// CHECK: ret void
void test_vst2q_p8(poly8_t * a, poly8x16x2_t b) {
@@ -16413,22 +16411,22 @@ void test_vst2q_p8(poly8_t * a, poly8x16x2_t b) {
}
// CHECK-LABEL: @test_vst2q_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -16736,22 +16734,22 @@ void test_vst2_p16(poly16_t * a, poly16x4x2_t b) {
}
// CHECK-LABEL: @test_vst2q_lane_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -16762,22 +16760,22 @@ void test_vst2q_lane_u16(uint16_t * a, uint16x8x2_t b) {
}
// CHECK-LABEL: @test_vst2q_lane_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x i32>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -16788,22 +16786,22 @@ void test_vst2q_lane_u32(uint32_t * a, uint32x4x2_t b) {
}
// CHECK-LABEL: @test_vst2q_lane_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -16814,22 +16812,22 @@ void test_vst2q_lane_s16(int16_t * a, int16x8x2_t b) {
}
// CHECK-LABEL: @test_vst2q_lane_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x i32>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -16840,22 +16838,22 @@ void test_vst2q_lane_s32(int32_t * a, int32x4x2_t b) {
}
// CHECK-LABEL: @test_vst2q_lane_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x half>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x half> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -16866,22 +16864,22 @@ void test_vst2q_lane_f16(float16_t * a, float16x8x2_t b) {
}
// CHECK-LABEL: @test_vst2q_lane_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <4 x float>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x float>], [2 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x float> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x float>], [2 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x float> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x float>
@@ -16892,22 +16890,22 @@ void test_vst2q_lane_f32(float32_t * a, float32x4x2_t b) {
}
// CHECK-LABEL: @test_vst2q_lane_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [2 x <8 x i16>]* [[COERCE_DIVE]] to [4 x i64]*
-// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 8
+// CHECK: store [4 x i64] [[B]].coerce, [4 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x2_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x2_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 32, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], [2 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -17163,23 +17161,23 @@ void test_vst2_lane_p16(poly16_t * a, poly16x4x2_t b) {
}
// CHECK-LABEL: @test_vst3q_u8(
-// CHECK: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <16 x i8>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint8x16x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint8x16x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 16
// CHECK: call void @llvm.arm.neon.vst3.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], i32 1)
// CHECK: ret void
void test_vst3q_u8(uint8_t * a, uint8x16x3_t b) {
@@ -17187,26 +17185,26 @@ void test_vst3q_u8(uint8_t * a, uint8x16x3_t b) {
}
// CHECK-LABEL: @test_vst3q_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -17218,26 +17216,26 @@ void test_vst3q_u16(uint16_t * a, uint16x8x3_t b) {
}
// CHECK-LABEL: @test_vst3q_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x i32>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -17249,23 +17247,23 @@ void test_vst3q_u32(uint32_t * a, uint32x4x3_t b) {
}
// CHECK-LABEL: @test_vst3q_s8(
-// CHECK: [[B:%.*]] = alloca %struct.int8x16x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int8x16x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <16 x i8>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int8x16x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int8x16x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 16
// CHECK: call void @llvm.arm.neon.vst3.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], i32 1)
// CHECK: ret void
void test_vst3q_s8(int8_t * a, int8x16x3_t b) {
@@ -17273,26 +17271,26 @@ void test_vst3q_s8(int8_t * a, int8x16x3_t b) {
}
// CHECK-LABEL: @test_vst3q_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -17304,26 +17302,26 @@ void test_vst3q_s16(int16_t * a, int16x8x3_t b) {
}
// CHECK-LABEL: @test_vst3q_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x i32>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -17335,26 +17333,26 @@ void test_vst3q_s32(int32_t * a, int32x4x3_t b) {
}
// CHECK-LABEL: @test_vst3q_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x half>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x half> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -17366,26 +17364,26 @@ void test_vst3q_f16(float16_t * a, float16x8x3_t b) {
}
// CHECK-LABEL: @test_vst3q_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x float>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x float> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x float> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x float> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x float>
@@ -17397,23 +17395,23 @@ void test_vst3q_f32(float32_t * a, float32x4x3_t b) {
}
// CHECK-LABEL: @test_vst3q_p8(
-// CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <16 x i8>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly8x16x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly8x16x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], [3 x <16 x i8>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 16
// CHECK: call void @llvm.arm.neon.vst3.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], i32 1)
// CHECK: ret void
void test_vst3q_p8(poly8_t * a, poly8x16x3_t b) {
@@ -17421,26 +17419,26 @@ void test_vst3q_p8(poly8_t * a, poly8x16x3_t b) {
}
// CHECK-LABEL: @test_vst3q_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -17803,26 +17801,26 @@ void test_vst3_p16(poly16_t * a, poly16x4x3_t b) {
}
// CHECK-LABEL: @test_vst3q_lane_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -17834,26 +17832,26 @@ void test_vst3q_lane_u16(uint16_t * a, uint16x8x3_t b) {
}
// CHECK-LABEL: @test_vst3q_lane_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x i32>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -17865,26 +17863,26 @@ void test_vst3q_lane_u32(uint32_t * a, uint32x4x3_t b) {
}
// CHECK-LABEL: @test_vst3q_lane_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -17896,26 +17894,26 @@ void test_vst3q_lane_s16(int16_t * a, int16x8x3_t b) {
}
// CHECK-LABEL: @test_vst3q_lane_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x i32>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], [3 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -17927,26 +17925,26 @@ void test_vst3q_lane_s32(int32_t * a, int32x4x3_t b) {
}
// CHECK-LABEL: @test_vst3q_lane_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x half>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x half> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -17958,26 +17956,26 @@ void test_vst3q_lane_f16(float16_t * a, float16x8x3_t b) {
}
// CHECK-LABEL: @test_vst3q_lane_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <4 x float>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x float> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x float> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x float>], [3 x <4 x float>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x float> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x float>
@@ -17989,26 +17987,26 @@ void test_vst3q_lane_f32(float32_t * a, float32x4x3_t b) {
}
// CHECK-LABEL: @test_vst3q_lane_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [3 x <8 x i16>]* [[COERCE_DIVE]] to [6 x i64]*
-// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 8
+// CHECK: store [6 x i64] [[B]].coerce, [6 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x3_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x3_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 48, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], [3 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -18309,26 +18307,26 @@ void test_vst3_lane_p16(poly16_t * a, poly16x4x3_t b) {
}
// CHECK-LABEL: @test_vst4q_u8(
-// CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <16 x i8>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint8x16x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint8x16x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 16
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX6]], align 16
// CHECK: call void @llvm.arm.neon.vst4.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], <16 x i8> [[TMP6]], i32 1)
// CHECK: ret void
void test_vst4q_u8(uint8_t * a, uint8x16x4_t b) {
@@ -18336,30 +18334,30 @@ void test_vst4q_u8(uint8_t * a, uint8x16x4_t b) {
}
// CHECK-LABEL: @test_vst4q_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -18372,30 +18370,30 @@ void test_vst4q_u16(uint16_t * a, uint16x8x4_t b) {
}
// CHECK-LABEL: @test_vst4q_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x i32>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -18408,26 +18406,26 @@ void test_vst4q_u32(uint32_t * a, uint32x4x4_t b) {
}
// CHECK-LABEL: @test_vst4q_s8(
-// CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <16 x i8>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int8x16x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int8x16x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 16
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX6]], align 16
// CHECK: call void @llvm.arm.neon.vst4.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], <16 x i8> [[TMP6]], i32 1)
// CHECK: ret void
void test_vst4q_s8(int8_t * a, int8x16x4_t b) {
@@ -18435,30 +18433,30 @@ void test_vst4q_s8(int8_t * a, int8x16x4_t b) {
}
// CHECK-LABEL: @test_vst4q_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -18471,30 +18469,30 @@ void test_vst4q_s16(int16_t * a, int16x8x4_t b) {
}
// CHECK-LABEL: @test_vst4q_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x i32>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -18507,30 +18505,30 @@ void test_vst4q_s32(int32_t * a, int32x4x4_t b) {
}
// CHECK-LABEL: @test_vst4q_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x half>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x half> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -18543,30 +18541,30 @@ void test_vst4q_f16(float16_t * a, float16x8x4_t b) {
}
// CHECK-LABEL: @test_vst4q_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x float>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x float> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x float> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x float> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <4 x float> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x float>
@@ -18579,26 +18577,26 @@ void test_vst4q_f32(float32_t * a, float32x4x4_t b) {
}
// CHECK-LABEL: @test_vst4q_p8(
-// CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <16 x i8>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly8x16x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly8x16x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX]], align 16
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX2]], align 16
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX4]], align 16
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], [4 x <16 x i8>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* [[ARRAYIDX6]], align 16
// CHECK: call void @llvm.arm.neon.vst4.p0i8.v16i8(i8* %a, <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], <16 x i8> [[TMP6]], i32 1)
// CHECK: ret void
void test_vst4q_p8(poly8_t * a, poly8x16x4_t b) {
@@ -18606,30 +18604,30 @@ void test_vst4q_p8(poly8_t * a, poly8x16x4_t b) {
}
// CHECK-LABEL: @test_vst4q_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -19047,30 +19045,30 @@ void test_vst4_p16(poly16_t * a, poly16x4x4_t b) {
}
// CHECK-LABEL: @test_vst4q_lane_u16(
-// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -19083,30 +19081,30 @@ void test_vst4q_lane_u16(uint16_t * a, uint16x8x4_t b) {
}
// CHECK-LABEL: @test_vst4q_lane_u32(
-// CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x i32>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.uint32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.uint32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -19119,30 +19117,30 @@ void test_vst4q_lane_u32(uint32_t * a, uint32x4x4_t b) {
}
// CHECK-LABEL: @test_vst4q_lane_s16(
-// CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -19155,30 +19153,30 @@ void test_vst4q_lane_s16(int16_t * a, int16x8x4_t b) {
}
// CHECK-LABEL: @test_vst4q_lane_s32(
-// CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x i32>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.int32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.int32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x i32>
@@ -19191,30 +19189,30 @@ void test_vst4q_lane_s32(int32_t * a, int32x4x4_t b) {
}
// CHECK-LABEL: @test_vst4q_lane_f16(
-// CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x half>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast half* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x half> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -19227,30 +19225,30 @@ void test_vst4q_lane_f16(float16_t * a, float16x8x4_t b) {
}
// CHECK-LABEL: @test_vst4q_lane_f32(
-// CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <4 x float>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.float32x4x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.float32x4x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast float* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <4 x float> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <4 x float> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <4 x float> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x float>], [4 x <4 x float>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <4 x float>, <4 x float>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <4 x float> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x float>
@@ -19263,30 +19261,30 @@ void test_vst4q_lane_f32(float32_t * a, float32x4x4_t b) {
}
// CHECK-LABEL: @test_vst4q_lane_p16(
-// CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 8
-// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 8
+// CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16
+// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[B]], i32 0, i32 0
// CHECK: [[TMP0:%.*]] = bitcast [4 x <8 x i16>]* [[COERCE_DIVE]] to [8 x i64]*
-// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 8
+// CHECK: store [8 x i64] [[B]].coerce, [8 x i64]* [[TMP0]], align 16
// CHECK: [[TMP1:%.*]] = bitcast %struct.poly16x8x4_t* [[__S1]] to i8*
// CHECK: [[TMP2:%.*]] = bitcast %struct.poly16x8x4_t* [[B]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP1]], i8* [[TMP2]], i32 64, i32 16, i1 false)
// CHECK: [[TMP3:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL]], i32 0, i32 0
-// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 8
+// CHECK: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX]], align 16
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP4]] to <16 x i8>
// CHECK: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL1]], i32 0, i32 1
-// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 8
+// CHECK: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX2]], align 16
// CHECK: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP6]] to <16 x i8>
// CHECK: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL3]], i32 0, i32 2
-// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 8
+// CHECK: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX4]], align 16
// CHECK: [[TMP9:%.*]] = bitcast <8 x i16> [[TMP8]] to <16 x i8>
// CHECK: [[VAL5:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[__S1]], i32 0, i32 0
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], [4 x <8 x i16>]* [[VAL5]], i32 0, i32 3
-// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 8
+// CHECK: [[TMP10:%.*]] = load <8 x i16>, <8 x i16>* [[ARRAYIDX6]], align 16
// CHECK: [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
@@ -20631,7 +20629,7 @@ poly16x4x2_t test_vtrn_p16(poly16x4_t a, poly16x4_t b) {
}
// CHECK-LABEL: @test_vtrnq_s8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
@@ -20641,14 +20639,14 @@ poly16x4x2_t test_vtrn_p16(poly16x4_t a, poly16x4_t b) {
// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], !noalias !30
// CHECK: [[TMP3:%.*]] = bitcast %struct.int8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.int8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
int8x16x2_t test_vtrnq_s8(int8x16_t a, int8x16_t b) {
return vtrnq_s8(a, b);
}
// CHECK-LABEL: @test_vtrnq_s16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -20660,14 +20658,14 @@ int8x16x2_t test_vtrnq_s8(int8x16_t a, int8x16_t b) {
// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], !noalias !33
// CHECK: [[TMP5:%.*]] = bitcast %struct.int16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.int16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
int16x8x2_t test_vtrnq_s16(int16x8_t a, int16x8_t b) {
return vtrnq_s16(a, b);
}
// CHECK-LABEL: @test_vtrnq_s32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8>
@@ -20679,14 +20677,14 @@ int16x8x2_t test_vtrnq_s16(int16x8_t a, int16x8_t b) {
// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], !noalias !36
// CHECK: [[TMP5:%.*]] = bitcast %struct.int32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.int32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
int32x4x2_t test_vtrnq_s32(int32x4_t a, int32x4_t b) {
return vtrnq_s32(a, b);
}
// CHECK-LABEL: @test_vtrnq_u8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
@@ -20696,14 +20694,14 @@ int32x4x2_t test_vtrnq_s32(int32x4_t a, int32x4_t b) {
// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], !noalias !39
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.uint8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint8x16x2_t test_vtrnq_u8(uint8x16_t a, uint8x16_t b) {
return vtrnq_u8(a, b);
}
// CHECK-LABEL: @test_vtrnq_u16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -20715,14 +20713,14 @@ uint8x16x2_t test_vtrnq_u8(uint8x16_t a, uint8x16_t b) {
// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], !noalias !42
// CHECK: [[TMP5:%.*]] = bitcast %struct.uint16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.uint16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint16x8x2_t test_vtrnq_u16(uint16x8_t a, uint16x8_t b) {
return vtrnq_u16(a, b);
}
// CHECK-LABEL: @test_vtrnq_u32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8>
@@ -20734,14 +20732,14 @@ uint16x8x2_t test_vtrnq_u16(uint16x8_t a, uint16x8_t b) {
// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], !noalias !45
// CHECK: [[TMP5:%.*]] = bitcast %struct.uint32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.uint32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) {
return vtrnq_u32(a, b);
}
// CHECK-LABEL: @test_vtrnq_f32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.float32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.float32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x float> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8>
@@ -20753,14 +20751,14 @@ uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) {
// CHECK: store <4 x float> [[VTRN1_I]], <4 x float>* [[TMP4]], !noalias !48
// CHECK: [[TMP5:%.*]] = bitcast %struct.float32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
float32x4x2_t test_vtrnq_f32(float32x4_t a, float32x4_t b) {
return vtrnq_f32(a, b);
}
// CHECK-LABEL: @test_vtrnq_p8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.poly8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.poly8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
@@ -20770,14 +20768,14 @@ float32x4x2_t test_vtrnq_f32(float32x4_t a, float32x4_t b) {
// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], !noalias !51
// CHECK: [[TMP3:%.*]] = bitcast %struct.poly8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.poly8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
poly8x16x2_t test_vtrnq_p8(poly8x16_t a, poly8x16_t b) {
return vtrnq_p8(a, b);
}
// CHECK-LABEL: @test_vtrnq_p16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.poly16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.poly16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -20789,7 +20787,7 @@ poly8x16x2_t test_vtrnq_p8(poly8x16_t a, poly8x16_t b) {
// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], !noalias !54
// CHECK: [[TMP5:%.*]] = bitcast %struct.poly16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.poly16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
poly16x8x2_t test_vtrnq_p16(poly16x8_t a, poly16x8_t b) {
return vtrnq_p16(a, b);
@@ -21125,7 +21123,7 @@ poly16x4x2_t test_vuzp_p16(poly16x4_t a, poly16x4_t b) {
}
// CHECK-LABEL: @test_vuzpq_s8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -21135,14 +21133,14 @@ poly16x4x2_t test_vuzp_p16(poly16x4_t a, poly16x4_t b) {
// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], !noalias !84
// CHECK: [[TMP3:%.*]] = bitcast %struct.int8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.int8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
int8x16x2_t test_vuzpq_s8(int8x16_t a, int8x16_t b) {
return vuzpq_s8(a, b);
}
// CHECK-LABEL: @test_vuzpq_s16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -21154,14 +21152,14 @@ int8x16x2_t test_vuzpq_s8(int8x16_t a, int8x16_t b) {
// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], !noalias !87
// CHECK: [[TMP5:%.*]] = bitcast %struct.int16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.int16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
int16x8x2_t test_vuzpq_s16(int16x8_t a, int16x8_t b) {
return vuzpq_s16(a, b);
}
// CHECK-LABEL: @test_vuzpq_s32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8>
@@ -21173,14 +21171,14 @@ int16x8x2_t test_vuzpq_s16(int16x8_t a, int16x8_t b) {
// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], !noalias !90
// CHECK: [[TMP5:%.*]] = bitcast %struct.int32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.int32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
int32x4x2_t test_vuzpq_s32(int32x4_t a, int32x4_t b) {
return vuzpq_s32(a, b);
}
// CHECK-LABEL: @test_vuzpq_u8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -21190,14 +21188,14 @@ int32x4x2_t test_vuzpq_s32(int32x4_t a, int32x4_t b) {
// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], !noalias !93
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.uint8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint8x16x2_t test_vuzpq_u8(uint8x16_t a, uint8x16_t b) {
return vuzpq_u8(a, b);
}
// CHECK-LABEL: @test_vuzpq_u16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -21209,14 +21207,14 @@ uint8x16x2_t test_vuzpq_u8(uint8x16_t a, uint8x16_t b) {
// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], !noalias !96
// CHECK: [[TMP5:%.*]] = bitcast %struct.uint16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.uint16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint16x8x2_t test_vuzpq_u16(uint16x8_t a, uint16x8_t b) {
return vuzpq_u16(a, b);
}
// CHECK-LABEL: @test_vuzpq_u32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8>
@@ -21228,14 +21226,14 @@ uint16x8x2_t test_vuzpq_u16(uint16x8_t a, uint16x8_t b) {
// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], !noalias !99
// CHECK: [[TMP5:%.*]] = bitcast %struct.uint32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.uint32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint32x4x2_t test_vuzpq_u32(uint32x4_t a, uint32x4_t b) {
return vuzpq_u32(a, b);
}
// CHECK-LABEL: @test_vuzpq_f32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.float32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.float32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x float> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8>
@@ -21247,14 +21245,14 @@ uint32x4x2_t test_vuzpq_u32(uint32x4_t a, uint32x4_t b) {
// CHECK: store <4 x float> [[VUZP1_I]], <4 x float>* [[TMP4]], !noalias !102
// CHECK: [[TMP5:%.*]] = bitcast %struct.float32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
float32x4x2_t test_vuzpq_f32(float32x4_t a, float32x4_t b) {
return vuzpq_f32(a, b);
}
// CHECK-LABEL: @test_vuzpq_p8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.poly8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.poly8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -21264,14 +21262,14 @@ float32x4x2_t test_vuzpq_f32(float32x4_t a, float32x4_t b) {
// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], !noalias !105
// CHECK: [[TMP3:%.*]] = bitcast %struct.poly8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.poly8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
poly8x16x2_t test_vuzpq_p8(poly8x16_t a, poly8x16_t b) {
return vuzpq_p8(a, b);
}
// CHECK-LABEL: @test_vuzpq_p16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.poly16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.poly16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -21283,7 +21281,7 @@ poly8x16x2_t test_vuzpq_p8(poly8x16_t a, poly8x16_t b) {
// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], !noalias !108
// CHECK: [[TMP5:%.*]] = bitcast %struct.poly16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.poly16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
poly16x8x2_t test_vuzpq_p16(poly16x8_t a, poly16x8_t b) {
return vuzpq_p16(a, b);
@@ -21455,7 +21453,7 @@ poly16x4x2_t test_vzip_p16(poly16x4_t a, poly16x4_t b) {
}
// CHECK-LABEL: @test_vzipq_s8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -21465,14 +21463,14 @@ poly16x4x2_t test_vzip_p16(poly16x4_t a, poly16x4_t b) {
// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], !noalias !138
// CHECK: [[TMP3:%.*]] = bitcast %struct.int8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.int8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
int8x16x2_t test_vzipq_s8(int8x16_t a, int8x16_t b) {
return vzipq_s8(a, b);
}
// CHECK-LABEL: @test_vzipq_s16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -21484,14 +21482,14 @@ int8x16x2_t test_vzipq_s8(int8x16_t a, int8x16_t b) {
// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], !noalias !141
// CHECK: [[TMP5:%.*]] = bitcast %struct.int16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.int16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
int16x8x2_t test_vzipq_s16(int16x8_t a, int16x8_t b) {
return vzipq_s16(a, b);
}
// CHECK-LABEL: @test_vzipq_s32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.int32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.int32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8>
@@ -21503,14 +21501,14 @@ int16x8x2_t test_vzipq_s16(int16x8_t a, int16x8_t b) {
// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], !noalias !144
// CHECK: [[TMP5:%.*]] = bitcast %struct.int32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.int32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
int32x4x2_t test_vzipq_s32(int32x4_t a, int32x4_t b) {
return vzipq_s32(a, b);
}
// CHECK-LABEL: @test_vzipq_u8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -21520,14 +21518,14 @@ int32x4x2_t test_vzipq_s32(int32x4_t a, int32x4_t b) {
// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], !noalias !147
// CHECK: [[TMP3:%.*]] = bitcast %struct.uint8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.uint8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint8x16x2_t test_vzipq_u8(uint8x16_t a, uint8x16_t b) {
return vzipq_u8(a, b);
}
// CHECK-LABEL: @test_vzipq_u16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -21539,14 +21537,14 @@ uint8x16x2_t test_vzipq_u8(uint8x16_t a, uint8x16_t b) {
// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], !noalias !150
// CHECK: [[TMP5:%.*]] = bitcast %struct.uint16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.uint16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint16x8x2_t test_vzipq_u16(uint16x8_t a, uint16x8_t b) {
return vzipq_u16(a, b);
}
// CHECK-LABEL: @test_vzipq_u32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.uint32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.uint32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8>
@@ -21558,14 +21556,14 @@ uint16x8x2_t test_vzipq_u16(uint16x8_t a, uint16x8_t b) {
// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], !noalias !153
// CHECK: [[TMP5:%.*]] = bitcast %struct.uint32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.uint32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
uint32x4x2_t test_vzipq_u32(uint32x4_t a, uint32x4_t b) {
return vzipq_u32(a, b);
}
// CHECK-LABEL: @test_vzipq_f32(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.float32x4x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.float32x4x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <4 x float> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8>
@@ -21577,14 +21575,14 @@ uint32x4x2_t test_vzipq_u32(uint32x4_t a, uint32x4_t b) {
// CHECK: store <4 x float> [[VZIP1_I]], <4 x float>* [[TMP4]], !noalias !156
// CHECK: [[TMP5:%.*]] = bitcast %struct.float32x4x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float32x4x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
float32x4x2_t test_vzipq_f32(float32x4_t a, float32x4_t b) {
return vzipq_f32(a, b);
}
// CHECK-LABEL: @test_vzipq_p8(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.poly8x16x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.poly8x16x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>*
// CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -21594,14 +21592,14 @@ float32x4x2_t test_vzipq_f32(float32x4_t a, float32x4_t b) {
// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], !noalias !159
// CHECK: [[TMP3:%.*]] = bitcast %struct.poly8x16x2_t* %agg.result to i8*
// CHECK: [[TMP4:%.*]] = bitcast %struct.poly8x16x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP3]], i8* [[TMP4]], i32 32, i32 16, i1 false)
// CHECK: ret void
poly8x16x2_t test_vzipq_p8(poly8x16_t a, poly8x16_t b) {
return vzipq_p8(a, b);
}
// CHECK-LABEL: @test_vzipq_p16(
-// CHECK: [[__RET_I:%.*]] = alloca %struct.poly16x8x2_t, align 8
+// CHECK: [[__RET_I:%.*]] = alloca %struct.poly16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %a to <16 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8>
@@ -21613,7 +21611,7 @@ poly8x16x2_t test_vzipq_p8(poly8x16_t a, poly8x16_t b) {
// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], !noalias !162
// CHECK: [[TMP5:%.*]] = bitcast %struct.poly16x8x2_t* %agg.result to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.poly16x8x2_t* [[__RET_I]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 8, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP5]], i8* [[TMP6]], i32 32, i32 16, i1 false)
// CHECK: ret void
poly16x8x2_t test_vzipq_p16(poly16x8_t a, poly16x8_t b) {
return vzipq_p16(a, b);
diff --git a/test/CodeGen/ubsan-pointer-overflow.m b/test/CodeGen/ubsan-pointer-overflow.m
new file mode 100644
index 000000000000..c83d527e624e
--- /dev/null
+++ b/test/CodeGen/ubsan-pointer-overflow.m
@@ -0,0 +1,171 @@
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -w -emit-llvm -o - %s -fsanitize=pointer-overflow | FileCheck %s
+
+// CHECK-LABEL: define void @unary_arith
+void unary_arith(char *p) {
+ // CHECK: [[BASE:%.*]] = ptrtoint i8* {{.*}} to i64, !nosanitize
+ // CHECK-NEXT: [[COMPGEP:%.*]] = add i64 [[BASE]], 1, !nosanitize
+ // CHECK-NEXT: [[POSVALID:%.*]] = icmp uge i64 [[COMPGEP]], [[BASE]], !nosanitize
+ // CHECK-NEXT: [[NEGVALID:%.*]] = icmp ult i64 [[COMPGEP]], [[BASE]], !nosanitize
+ // CHECK-NEXT: [[DIFFVALID:%.*]] = select i1 true, i1 [[POSVALID]], i1 [[NEGVALID]], !nosanitize
+ // CHECK-NEXT: [[VALID:%.*]] = and i1 true, [[DIFFVALID]], !nosanitize
+ // CHECK-NEXT: br i1 [[VALID]]{{.*}}, !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}, i64 [[BASE]], i64 [[COMPGEP]]){{.*}}, !nosanitize
+ ++p;
+
+ // CHECK: ptrtoint i8* {{.*}} to i64, !nosanitize
+ // CHECK-NEXT: add i64 {{.*}}, -1, !nosanitize
+ // CHECK: select i1 false{{.*}}, !nosanitize
+ // CHECK-NEXT: and i1 true{{.*}}, !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+ --p;
+
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+ p++;
+
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+ p--;
+}
+
+// CHECK-LABEL: define void @binary_arith
+void binary_arith(char *p, int i) {
+ // CHECK: [[SMUL:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %{{.*}}), !nosanitize
+ // CHECK-NEXT: [[SMULOFLOW:%.*]] = extractvalue { i64, i1 } [[SMUL]], 1, !nosanitize
+ // CHECK-NEXT: [[OFFSETOFLOW:%.*]] = or i1 false, [[SMULOFLOW]], !nosanitize
+ // CHECK-NEXT: [[SMULVAL:%.*]] = extractvalue { i64, i1 } [[SMUL]], 0, !nosanitize
+ // CHECK-NEXT: [[BASE:%.*]] = ptrtoint i8* {{.*}} to i64, !nosanitize
+ // CHECK-NEXT: [[COMPGEP:%.*]] = add i64 [[BASE]], [[SMULVAL]], !nosanitize
+ // CHECK-NEXT: [[POSVALID:%.*]] = icmp uge i64 [[COMPGEP]], [[BASE]], !nosanitize
+ // CHECK-NEXT: [[NEGVALID:%.*]] = icmp ult i64 [[COMPGEP]], [[BASE]], !nosanitize
+ // CHECK-NEXT: [[POSOFFSET:%.*]] = icmp sge i64 [[SMULVAL]], 0, !nosanitize
+ // CHECK-NEXT: [[OFFSETVALID:%.*]] = xor i1 [[OFFSETOFLOW]], true, !nosanitize
+ // CHECK-NEXT: [[DIFFVALID:%.*]] = select i1 [[POSOFFSET]], i1 [[POSVALID]], i1 [[NEGVALID]], !nosanitize
+ // CHECK-NEXT: [[VALID:%.*]] = and i1 [[OFFSETVALID]], [[DIFFVALID]], !nosanitize
+ // CHECK-NEXT: br i1 [[VALID]]{{.*}}, !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}, i64 [[BASE]], i64 [[COMPGEP]]){{.*}}, !nosanitize
+ p + i;
+
+ // CHECK: [[OFFSET:%.*]] = sub i64 0, {{.*}}
+ // CHECK-NEXT: getelementptr inbounds {{.*}} [[OFFSET]]
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+ p - i;
+}
+
+// CHECK-LABEL: define void @fixed_len_array
+void fixed_len_array(int k) {
+ // CHECK: getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[ARR:%.*]], i64 0, i64 [[IDXPROM:%.*]]
+ // CHECK-NEXT: [[SMUL:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 40, i64 [[IDXPROM]]), !nosanitize
+ // CHECK-NEXT: [[SMULOFLOW:%.*]] = extractvalue { i64, i1 } [[SMUL]], 1, !nosanitize
+ // CHECK-NEXT: [[OFFSETOFLOW:%.*]] = or i1 false, [[SMULOFLOW]], !nosanitize
+ // CHECK-NEXT: [[SMULVAL:%.*]] = extractvalue { i64, i1 } [[SMUL]], 0, !nosanitize
+ // CHECK-NEXT: [[BASE:%.*]] = ptrtoint [10 x [10 x i32]]* [[ARR]] to i64, !nosanitize
+ // CHECK-NEXT: [[COMPGEP:%.*]] = add i64 [[BASE]], [[SMULVAL]], !nosanitize
+ // CHECK-NEXT: [[POSVALID:%.*]] = icmp uge i64 [[COMPGEP]], [[BASE]], !nosanitize
+ // CHECK-NEXT: [[NEGVALID:%.*]] = icmp ult i64 [[COMPGEP]], [[BASE]], !nosanitize
+ // CHECK-NEXT: [[POSOFFSET:%.*]] = icmp sge i64 [[SMULVAL]], 0, !nosanitize
+ // CHECK-NEXT: [[OFFSETVALID:%.*]] = xor i1 [[OFFSETOFLOW]], true, !nosanitize
+ // CHECK-NEXT: [[DIFFVALID:%.*]] = select i1 [[POSOFFSET]], i1 [[POSVALID]], i1 [[NEGVALID]], !nosanitize
+ // CHECK-NEXT: [[VALID:%.*]] = and i1 [[OFFSETVALID]], [[DIFFVALID]], !nosanitize
+ // CHECK-NEXT: br i1 [[VALID]]{{.*}}, !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}, i64 [[BASE]], i64 [[COMPGEP]]){{.*}}, !nosanitize
+
+ // CHECK: getelementptr inbounds [10 x i32], [10 x i32]* {{.*}}, i64 0, i64 [[IDXPROM1:%.*]]
+ // CHECK-NEXT: @llvm.smul.with.overflow.i64(i64 4, i64 [[IDXPROM1]]), !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+
+ int arr[10][10];
+ arr[k][k];
+}
+
+// CHECK-LABEL: define void @variable_len_array
+void variable_len_array(int n, int k) {
+ // CHECK: getelementptr inbounds i32, i32* {{.*}}, i64 [[IDXPROM:%.*]]
+ // CHECK-NEXT: @llvm.smul.with.overflow.i64(i64 4, i64 [[IDXPROM]]), !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+
+ // CHECK: getelementptr inbounds i32, i32* {{.*}}, i64 [[IDXPROM1:%.*]]
+ // CHECK-NEXT: @llvm.smul.with.overflow.i64(i64 4, i64 [[IDXPROM1]]), !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+
+ int arr[n][n];
+ arr[k][k];
+}
+
+// CHECK-LABEL: define void @pointer_array
+void pointer_array(int **arr, int k) {
+ // CHECK: @llvm.smul.with.overflow.i64(i64 8, i64 {{.*}}), !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+
+ // CHECK: @llvm.smul.with.overflow.i64(i64 4, i64 {{.*}}), !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+
+ arr[k][k];
+}
+
+struct S1 {
+ int pad1;
+ union {
+ char leaf;
+ struct S1 *link;
+ } u;
+ struct S1 *arr;
+};
+
+// TODO: Currently, structure GEPs are not checked, so there are several
+// potentially unsafe GEPs here which we don't instrument.
+//
+// CHECK-LABEL: define void @struct_index
+void struct_index(struct S1 *p) {
+ // CHECK: getelementptr inbounds %struct.S1, %struct.S1* [[P:%.*]], i64 10
+ // CHECK-NEXT: [[BASE:%.*]] = ptrtoint %struct.S1* [[P]] to i64, !nosanitize
+ // CHECK-NEXT: [[COMPGEP:%.*]] = add i64 [[BASE]], 240, !nosanitize
+ // CHECK: @__ubsan_handle_pointer_overflow{{.*}} i64 [[BASE]], i64 [[COMPGEP]]) {{.*}}, !nosanitize
+
+ // CHECK-NOT: @__ubsan_handle_pointer_overflow
+
+ p->arr[10].u.link->u.leaf;
+}
+
+typedef void (*funcptr_t)(void);
+
+// CHECK-LABEL: define void @function_pointer_arith
+void function_pointer_arith(funcptr_t *p, int k) {
+ // CHECK: add i64 {{.*}}, 8, !nosanitize
+ // CHECK: @__ubsan_handle_pointer_overflow{{.*}}
+ ++p;
+
+ // CHECK: @llvm.smul.with.overflow.i64(i64 8, i64 {{.*}}), !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+ p + k;
+}
+
+// CHECK-LABEL: define void @variable_len_array_arith
+void variable_len_array_arith(int n, int k) {
+ int vla[n];
+ int (*p)[n] = &vla;
+
+ // CHECK: getelementptr inbounds i32, i32* {{.*}}, i64 [[INC:%.*]]
+ // CHECK: @llvm.smul.with.overflow.i64(i64 4, i64 [[INC]]), !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+ ++p;
+
+ // CHECK: getelementptr inbounds i32, i32* {{.*}}, i64 [[IDXPROM:%.*]]
+ // CHECK: @llvm.smul.with.overflow.i64(i64 4, i64 [[IDXPROM]]), !nosanitize
+ // CHECK: call void @__ubsan_handle_pointer_overflow{{.*}}
+ p + k;
+}
+
+// CHECK-LABEL: define void @objc_id
+void objc_id(id *p) {
+ // CHECK: add i64 {{.*}}, 8, !nosanitize
+ // CHECK: @__ubsan_handle_pointer_overflow{{.*}}
+ p++;
+}
+
+// CHECK-LABEL: define void @dont_emit_checks_for_no_op_GEPs
+// CHECK-NOT: __ubsan_handle_pointer_overflow
+void dont_emit_checks_for_no_op_GEPs(char *p) {
+ &p[0];
+
+ int arr[10][10];
+ &arr[0][0];
+}
diff --git a/test/CodeGenCXX/stmtexpr.cpp b/test/CodeGenCXX/stmtexpr.cpp
index 5885a1663e63..5bd9908d6c25 100644
--- a/test/CodeGenCXX/stmtexpr.cpp
+++ b/test/CodeGenCXX/stmtexpr.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -Wno-unused-value -triple %itanium_abi_triple -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -Wno-unused-value -triple i686-linux-gnu -emit-llvm -o - %s | FileCheck %s
// rdar: //8540501
extern "C" int printf(...);
extern "C" void abort();
@@ -139,6 +139,34 @@ extern "C" int cleanup_exit_lvalue(bool cond) {
// CHECK: %[[v:[^ ]*]] = load i32*, i32** %[[tmp]]
// CHECK-NEXT: store i32* %[[v]], i32** %r
+// Bind the reference to a byval argument. It is not an instruction or Constant,
+// so it's a bit of a corner case.
+struct ByVal { int x[3]; };
+extern "C" int cleanup_exit_lvalue_byval(bool cond, ByVal arg) {
+ ByVal &r = (A(1), ({ if (cond) return 0; (void)ByVal(); }), arg);
+ return r.x[0];
+}
+// CHECK-LABEL: define{{.*}} i32 @cleanup_exit_lvalue_byval({{.*}}, %struct.ByVal* byval align 4 %arg)
+// CHECK: call {{.*}} @_ZN1AC1Ei
+// CHECK: call {{.*}} @_ZN1AD1Ev
+// CHECK: switch
+// CHECK: store %struct.ByVal* %arg, %struct.ByVal** %r
+
+// Bind the reference to a local variable. We don't need to spill it. Binding a
+// reference to it doesn't generate any instructions.
+extern "C" int cleanup_exit_lvalue_local(bool cond) {
+ int local = 42;
+ int &r = (A(1), ({ if (cond) return 0; (void)0; }), local);
+ return r;
+}
+// CHECK-LABEL: define{{.*}} i32 @cleanup_exit_lvalue_local({{.*}})
+// CHECK: %local = alloca i32
+// CHECK: store i32 42, i32* %local
+// CHECK: call {{.*}} @_ZN1AC1Ei
+// CHECK-NOT: store i32* %local
+// CHECK: call {{.*}} @_ZN1AD1Ev
+// CHECK: switch
+// CHECK: store i32* %local, i32** %r, align 4
// We handle ExprWithCleanups for complex evaluation type separately, and it had
// the same bug.
diff --git a/test/CodeGenCXX/strict-vtable-pointers.cpp b/test/CodeGenCXX/strict-vtable-pointers.cpp
index 928817bfb1f5..c3798920abdd 100644
--- a/test/CodeGenCXX/strict-vtable-pointers.cpp
+++ b/test/CodeGenCXX/strict-vtable-pointers.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 %s -I%S -triple=x86_64-apple-darwin10 -fstrict-vtable-pointers -disable-llvm-passes -O2 -emit-llvm -o %t.ll
+// RUN: %clang_cc1 %s -I%S -triple=x86_64-apple-darwin10 -fstrict-vtable-pointers -std=c++11 -disable-llvm-passes -O2 -emit-llvm -o %t.ll
// RUN: FileCheck --check-prefix=CHECK-CTORS %s < %t.ll
// RUN: FileCheck --check-prefix=CHECK-NEW %s < %t.ll
// RUN: FileCheck --check-prefix=CHECK-DTORS %s < %t.ll
@@ -180,6 +180,119 @@ struct DynamicFromStatic;
// CHECK-CTORS-NOT: @llvm.invariant.group.barrier(
// CHECK-CTORS-LABEL: {{^}}}
+struct A {
+ virtual void foo();
+};
+struct B : A {
+ virtual void foo();
+};
+
+union U {
+ A a;
+ B b;
+};
+
+void changeToB(U *u);
+void changeToA(U *u);
+
+void g2(A *a) {
+ a->foo();
+}
+// We have to guard access to union fields with invariant.group, because
+// it is very easy to skip the barrier with unions. In this example the inlined
+// g2 will produce loads with the same !invariant.group metadata, and
+// u->a and u->b would use the same pointer.
+// CHECK-NEW-LABEL: define void @_Z14UnionsBarriersP1U
+void UnionsBarriers(U *u) {
+ // CHECK-NEW: call void @_Z9changeToBP1U(
+ changeToB(u);
+ // CHECK-NEW: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: call void @_Z2g2P1A(%struct.A*
+ g2(&u->b);
+ // CHECK-NEW: call void @_Z9changeToAP1U(%union.U*
+ changeToA(u);
+ // CHECK-NEW: call i8* @llvm.invariant.group.barrier(i8*
+ // call void @_Z2g2P1A(%struct.A* %a)
+ g2(&u->a);
+ // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier(i8*
+}
+
+struct HoldingVirtuals {
+ A a;
+};
+
+struct Empty {};
+struct AnotherEmpty {
+ Empty e;
+};
+union NoVptrs {
+ int a;
+ AnotherEmpty empty;
+};
+void take(AnotherEmpty &);
+
+// CHECK-NEW-LABEL: noBarriers
+void noBarriers(NoVptrs &noVptrs) {
+ // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: 42
+ noVptrs.a += 42;
+ // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: call void @_Z4takeR12AnotherEmpty(
+ take(noVptrs.empty);
+}
+
+union U2 {
+ HoldingVirtuals h;
+ int z;
+};
+void take(HoldingVirtuals &);
+
+// CHECK-NEW-LABEL: define void @_Z15UnionsBarriers2R2U2
+void UnionsBarriers2(U2 &u) {
+ // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: 42
+ u.z += 42;
+ // CHECK-NEW: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: call void @_Z4takeR15HoldingVirtuals(
+ take(u.h);
+}
+
+struct VirtualInBase : HoldingVirtuals, Empty {
+};
+
+struct VirtualInVBase : virtual Empty, virtual HoldingVirtuals {
+};
+
+// It has vtable by virtual inheritance.
+struct VirtualInheritance : virtual Empty {
+};
+
+union U3 {
+ VirtualInBase v1;
+ VirtualInBase v2;
+ VirtualInheritance v3;
+ int z;
+};
+
+void take(VirtualInBase &);
+void take(VirtualInVBase &);
+void take(VirtualInheritance &);
+
+void UnionsBarrier3(U3 &u) {
+ // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: 42
+ u.z += 42;
+ // CHECK-NEW: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: call void @_Z4takeR13VirtualInBase(
+ take(u.v1);
+ // CHECK-NEW: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: call void @_Z4takeR13VirtualInBase(
+ take(u.v2);
+
+ // CHECK-NEW: call i8* @llvm.invariant.group.barrier(i8*
+ // CHECK-NEW: call void @_Z4takeR18VirtualInheritance(
+ take(u.v3);
+}
/** DTORS **/
// CHECK-DTORS-LABEL: define linkonce_odr void @_ZN10StaticBaseD2Ev(
diff --git a/test/CodeGenCXX/vtable-available-externally.cpp b/test/CodeGenCXX/vtable-available-externally.cpp
index ae81959a75e9..db99f73d9e72 100644
--- a/test/CodeGenCXX/vtable-available-externally.cpp
+++ b/test/CodeGenCXX/vtable-available-externally.cpp
@@ -12,6 +12,7 @@
// RUN: FileCheck --check-prefix=CHECK-TEST14 %s < %t.opt
// RUN: FileCheck --check-prefix=CHECK-TEST15 %s < %t.opt
// RUN: FileCheck --check-prefix=CHECK-TEST16 %s < %t.opt
+// RUN: FileCheck --check-prefix=CHECK-TEST17 %s < %t.opt
#include <typeinfo>
@@ -274,8 +275,8 @@ struct C {
virtual D& operator=(const D&);
};
-// Cannot emit B's vtable available_externally, because we cannot create
-// a reference to the inline virtual B::operator= function.
+// Cannot emit D's vtable available_externally, because we cannot create
+// a reference to the inline virtual D::operator= function.
// CHECK-TEST11: @_ZTVN6Test111DE = external unnamed_addr constant
struct D : C {
virtual void key();
@@ -391,3 +392,30 @@ void test() {
}
}
+namespace Test17 {
+// This test checks if we emit vtables opportunistically.
+// CHECK-TEST17-DAG: @_ZTVN6Test171AE = available_externally
+// CHECK-TEST17-DAG: @_ZTVN6Test171BE = external
+
+struct A {
+ virtual void key();
+ virtual void bar() {}
+};
+
+// We won't gonna use deleting destructor for this type, which will disallow
+// emitting vtable as available_externally
+struct B {
+ virtual void key();
+ virtual ~B() {}
+};
+
+void testcaseA() {
+ A a;
+ a.bar(); // this forces to emit definition of bar
+}
+
+void testcaseB() {
+ B b; // This only forces emitting of complete object destructor
+}
+
+} // namespace Test17
diff --git a/test/CodeGenCXX/vtable-linkage.cpp b/test/CodeGenCXX/vtable-linkage.cpp
index c4ebf75b999a..0c55eb26f7f1 100644
--- a/test/CodeGenCXX/vtable-linkage.cpp
+++ b/test/CodeGenCXX/vtable-linkage.cpp
@@ -145,12 +145,14 @@ void use_F() {
// F<int> is an explicit template instantiation declaration without a
// key function, so its vtable should have external linkage.
// CHECK-DAG: @_ZTV1FIiE = external unnamed_addr constant
-// CHECK-OPT-DAG: @_ZTV1FIiE = external unnamed_addr constant
+// CHECK-OPT-DAG: @_ZTV1FIiE = available_externally unnamed_addr constant
// E<int> is an explicit template instantiation declaration. It has a
// key function is not instantiated, so we know that vtable definition
// will be generated in TU where key function will be defined
-// so we can mark it as available_externally (only with optimizations)
+// so we can mark it as external (without optimizations) and
+// available_externally (with optimizations) because all of the inline
+// virtual functions have been emitted.
// CHECK-DAG: @_ZTV1EIiE = external unnamed_addr constant
// CHECK-OPT-DAG: @_ZTV1EIiE = available_externally unnamed_addr constant
diff --git a/test/CodeGenCoroutines/coro-await-domination.cpp b/test/CodeGenCoroutines/coro-await-domination.cpp
new file mode 100644
index 000000000000..5df22374a6e7
--- /dev/null
+++ b/test/CodeGenCoroutines/coro-await-domination.cpp
@@ -0,0 +1,38 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fcoroutines-ts -std=c++14 -emit-llvm %s -o - | FileCheck %s
+#include "Inputs/coroutine.h"
+
+using namespace std::experimental;
+
+struct coro {
+ struct promise_type {
+ coro get_return_object();
+ suspend_never initial_suspend();
+ suspend_never final_suspend();
+ void return_void();
+ static void unhandled_exception();
+ };
+};
+
+struct A {
+ ~A();
+ bool await_ready();
+ int await_resume() { return 8; }
+ template <typename F> void await_suspend(F);
+};
+
+extern "C" void consume(int);
+
+// Verifies that domination is properly built during cleanup.
+// Without CGCleanup.cpp fix verifier was reporting:
+// Instruction does not dominate all uses!
+// %tmp.exprcleanup = alloca i32*, align 8
+// store i32* %x, i32** %tmp.exprcleanup, align 8
+
+
+// CHECK-LABEL: f(
+extern "C" coro f(int) {
+ int x = 42;
+ x = co_await A{};
+ consume(x);
+}
+
diff --git a/test/CodeGenObjC/parameterized_classes.m b/test/CodeGenObjC/parameterized_classes.m
index b75cf2e3ad2d..8fe5c52b8d39 100644
--- a/test/CodeGenObjC/parameterized_classes.m
+++ b/test/CodeGenObjC/parameterized_classes.m
@@ -68,3 +68,31 @@ void blockTest(NSMutableArray<void (^)(void)> *array, NSString *name) {
// CHECK: call i8* @objc_retainBlock
// CHECK: ret void
}
+
+// CHECK-LABEL: define internal void @"\01-[Derived setDest:]
+// CHECK: %[[SELFADDR:.*]] = alloca %[[SELFTY:.*]]*
+// CHECK: %[[AADDR:.*]] = alloca %[[IVARTY:.*]]*
+// CHECK: %[[V2:.*]] = load %[[IVARTY]]*, %[[IVARTY]]** %[[AADDR]]
+// CHECK: %[[V3:.*]] = load %[[SELFTY]]*, %[[SELFTY]]** %[[SELFADDR]]
+// CHECK: %[[IVAR:.*]] = load i64, i64* @"OBJC_IVAR_$_Base._destination"
+// CHECK: %[[V4:.*]] = bitcast %[[SELFTY]]* %[[V3]] to i8*
+// CHECK: %[[ADDPTR:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 %[[IVAR]]
+// CHECK: %[[V5:.*]] = bitcast i8* %[[ADDPTR]] to %[[IVARTY]]**
+// CHECK: %[[V6:.*]] = bitcast %[[IVARTY]]** %[[V5]] to i8**
+// CHECK: %[[V7:.*]] = bitcast %[[IVARTY]]* %[[V2]] to i8*
+// CHECK: call void @objc_storeStrong(i8** %[[V6]], i8* %[[V7]])
+
+@interface Base<DestType> : NSObject {
+ DestType _destination;
+}
+@end
+
+@interface Derived : Base<NSObject *>
+- (void)setDest:(NSObject *)a;
+@end
+
+@implementation Derived
+- (void)setDest:(NSObject *)a {
+ _destination = a;
+}
+@end
diff --git a/test/CodeGenOpenCL/bool_cast.cl b/test/CodeGenOpenCL/bool_cast.cl
index ab40eccf571f..72926eb141ab 100644
--- a/test/CodeGenOpenCL/bool_cast.cl
+++ b/test/CodeGenOpenCL/bool_cast.cl
@@ -4,7 +4,7 @@ typedef unsigned char uchar4 __attribute((ext_vector_type(4)));
typedef unsigned int int4 __attribute((ext_vector_type(4)));
typedef float float4 __attribute((ext_vector_type(4)));
-// CHECK-LABEL: define void @ker()
+// CHECK-LABEL: define spir_kernel void @ker()
void kernel ker() {
bool t = true;
int4 vec4 = (int4)t;
diff --git a/test/CodeGenOpenCL/kernel-attributes.cl b/test/CodeGenOpenCL/kernel-attributes.cl
index e61a75f7b537..eff2e57d8c9b 100644
--- a/test/CodeGenOpenCL/kernel-attributes.cl
+++ b/test/CodeGenOpenCL/kernel-attributes.cl
@@ -3,13 +3,13 @@
typedef unsigned int uint4 __attribute__((ext_vector_type(4)));
kernel __attribute__((vec_type_hint(int))) __attribute__((reqd_work_group_size(1,2,4))) void kernel1(int a) {}
-// CHECK: define void @kernel1(i32 {{[^%]*}}%a) {{[^{]+}} !vec_type_hint ![[MD1:[0-9]+]] !reqd_work_group_size ![[MD2:[0-9]+]]
+// CHECK: define spir_kernel void @kernel1(i32 {{[^%]*}}%a) {{[^{]+}} !vec_type_hint ![[MD1:[0-9]+]] !reqd_work_group_size ![[MD2:[0-9]+]]
kernel __attribute__((vec_type_hint(uint4))) __attribute__((work_group_size_hint(8,16,32))) void kernel2(int a) {}
-// CHECK: define void @kernel2(i32 {{[^%]*}}%a) {{[^{]+}} !vec_type_hint ![[MD3:[0-9]+]] !work_group_size_hint ![[MD4:[0-9]+]]
+// CHECK: define spir_kernel void @kernel2(i32 {{[^%]*}}%a) {{[^{]+}} !vec_type_hint ![[MD3:[0-9]+]] !work_group_size_hint ![[MD4:[0-9]+]]
kernel __attribute__((intel_reqd_sub_group_size(8))) void kernel3(int a) {}
-// CHECK: define void @kernel3(i32 {{[^%]*}}%a) {{[^{]+}} !intel_reqd_sub_group_size ![[MD5:[0-9]+]]
+// CHECK: define spir_kernel void @kernel3(i32 {{[^%]*}}%a) {{[^{]+}} !intel_reqd_sub_group_size ![[MD5:[0-9]+]]
// CHECK: [[MD1]] = !{i32 undef, i32 1}
// CHECK: [[MD2]] = !{i32 1, i32 2, i32 4}
diff --git a/test/CodeGenOpenCL/kernel-metadata.cl b/test/CodeGenOpenCL/kernel-metadata.cl
index 4165f1fa0ce5..95be43015aa9 100644
--- a/test/CodeGenOpenCL/kernel-metadata.cl
+++ b/test/CodeGenOpenCL/kernel-metadata.cl
@@ -6,5 +6,5 @@ void normal_function() {
__kernel void kernel_function() {
}
-// CHECK: define void @kernel_function() {{[^{]+}} !kernel_arg_addr_space ![[MD:[0-9]+]] !kernel_arg_access_qual ![[MD]] !kernel_arg_type ![[MD]] !kernel_arg_base_type ![[MD]] !kernel_arg_type_qual ![[MD]] {
+// CHECK: define spir_kernel void @kernel_function() {{[^{]+}} !kernel_arg_addr_space ![[MD:[0-9]+]] !kernel_arg_access_qual ![[MD]] !kernel_arg_type ![[MD]] !kernel_arg_base_type ![[MD]] !kernel_arg_type_qual ![[MD]] {
// CHECK: ![[MD]] = !{}
diff --git a/test/CodeGenOpenCL/kernels-have-spir-cc-by-default.cl b/test/CodeGenOpenCL/kernels-have-spir-cc-by-default.cl
new file mode 100644
index 000000000000..5bb52e9beb51
--- /dev/null
+++ b/test/CodeGenOpenCL/kernels-have-spir-cc-by-default.cl
@@ -0,0 +1,65 @@
+// RUN: %clang_cc1 %s -cl-std=CL1.2 -emit-llvm -triple x86_64-unknown-unknown -o - | FileCheck %s
+// RUN: %clang_cc1 %s -cl-std=CL1.2 -emit-llvm -triple amdgcn-unknown-unknown -o - | FileCheck -check-prefixes=AMDGCN %s
+// Test that the kernels always use the SPIR calling convention
+// to have unambiguous mapping of arguments to feasibly implement
+// clSetKernelArg().
+
+typedef struct int_single {
+ int a;
+} int_single;
+
+typedef struct int_pair {
+ long a;
+ long b;
+} int_pair;
+
+typedef struct test_struct {
+ int elementA;
+ int elementB;
+ long elementC;
+ char elementD;
+ long elementE;
+ float elementF;
+ short elementG;
+ double elementH;
+} test_struct;
+
+kernel void test_single(int_single input, global int* output) {
+// CHECK: spir_kernel
+// AMDGCN: define amdgpu_kernel void @test_single
+// CHECK: struct.int_single* byval nocapture
+// CHECK: i32* nocapture %output
+ output[0] = input.a;
+}
+
+kernel void test_pair(int_pair input, global int* output) {
+// CHECK: spir_kernel
+// AMDGCN: define amdgpu_kernel void @test_pair
+// CHECK: struct.int_pair* byval nocapture
+// CHECK: i32* nocapture %output
+ output[0] = (int)input.a;
+ output[1] = (int)input.b;
+}
+
+kernel void test_kernel(test_struct input, global int* output) {
+// CHECK: spir_kernel
+// AMDGCN: define amdgpu_kernel void @test_kernel
+// CHECK: struct.test_struct* byval nocapture
+// CHECK: i32* nocapture %output
+ output[0] = input.elementA;
+ output[1] = input.elementB;
+ output[2] = (int)input.elementC;
+ output[3] = (int)input.elementD;
+ output[4] = (int)input.elementE;
+ output[5] = (int)input.elementF;
+ output[6] = (int)input.elementG;
+ output[7] = (int)input.elementH;
+};
+
+void test_function(int_pair input, global int* output) {
+// CHECK-NOT: spir_kernel
+// AMDGCN-NOT: define amdgpu_kernel void @test_function
+// CHECK: i64 %input.coerce0, i64 %input.coerce1, i32* nocapture %output
+ output[0] = (int)input.a;
+ output[1] = (int)input.b;
+}
diff --git a/test/CodeGenOpenCL/pipe_types.cl b/test/CodeGenOpenCL/pipe_types.cl
index f5b42e2a490f..7c11f74ad7b9 100644
--- a/test/CodeGenOpenCL/pipe_types.cl
+++ b/test/CodeGenOpenCL/pipe_types.cl
@@ -28,7 +28,7 @@ void test5(read_only pipe int4 p) {
typedef read_only pipe int MyPipe;
kernel void test6(MyPipe p) {
-// CHECK: define void @test6(%opencl.pipe_t* %p)
+// CHECK: define spir_kernel void @test6(%opencl.pipe_t* %p)
}
struct Person {
diff --git a/test/CodeGenOpenCL/ptx-calls.cl b/test/CodeGenOpenCL/ptx-calls.cl
index bde00bc3d73a..2a3400371edf 100644
--- a/test/CodeGenOpenCL/ptx-calls.cl
+++ b/test/CodeGenOpenCL/ptx-calls.cl
@@ -7,7 +7,7 @@ void device_function() {
__kernel void kernel_function() {
device_function();
}
-// CHECK-LABEL: define void @kernel_function()
+// CHECK-LABEL: define spir_kernel void @kernel_function()
// CHECK: call void @device_function()
// CHECK: !{{[0-9]+}} = !{void ()* @kernel_function, !"kernel", i32 1}
diff --git a/test/CodeGenOpenCL/ptx-kernels.cl b/test/CodeGenOpenCL/ptx-kernels.cl
index fc6de4f3d517..b9e1c224c7ce 100644
--- a/test/CodeGenOpenCL/ptx-kernels.cl
+++ b/test/CodeGenOpenCL/ptx-kernels.cl
@@ -6,6 +6,6 @@ void device_function() {
__kernel void kernel_function() {
}
-// CHECK-LABEL: define void @kernel_function()
+// CHECK-LABEL: define spir_kernel void @kernel_function()
// CHECK: !{{[0-9]+}} = !{void ()* @kernel_function, !"kernel", i32 1}
diff --git a/test/Driver/arm-cortex-cpus.c b/test/Driver/arm-cortex-cpus.c
index c52e643f0337..80912678e029 100644
--- a/test/Driver/arm-cortex-cpus.c
+++ b/test/Driver/arm-cortex-cpus.c
@@ -120,11 +120,11 @@
// RUN: %clang -target armv7a-linux-gnueabi -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V7A %s
// RUN: %clang -target arm-linux-gnueabi -march=armv7-a -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V7A %s
-// CHECK-V7A: "-cc1"{{.*}} "-triple" "armv7-{{.*}} "-target-cpu" "cortex-a8"
+// CHECK-V7A: "-cc1"{{.*}} "-triple" "armv7-{{.*}} "-target-cpu" "generic"
// RUN: %clang -target armv7a-linux-gnueabi -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V7A-THUMB %s
// RUN: %clang -target arm-linux-gnueabi -march=armv7-a -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V7A-THUMB %s
-// CHECK-V7A-THUMB: "-cc1"{{.*}} "-triple" "thumbv7-{{.*}} "-target-cpu" "cortex-a8"
+// CHECK-V7A-THUMB: "-cc1"{{.*}} "-triple" "thumbv7-{{.*}} "-target-cpu" "generic"
// RUN: %clang -target armv7r-linux-gnueabi -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V7R %s
// RUN: %clang -target arm-linux-gnueabi -march=armv7-r -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V7R %s
@@ -144,7 +144,7 @@
// RUN: %clang -target armv8a -mlittle-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8A %s
// RUN: %clang -target arm -march=armv8a -mlittle-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8A %s
// RUN: %clang -target arm -mlittle-endian -march=armv8-a -mlittle-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8A %s
-// CHECK-V8A: "-cc1"{{.*}} "-triple" "armv8-{{.*}}" "-target-cpu" "cortex-a53"
+// CHECK-V8A: "-cc1"{{.*}} "-triple" "armv8-{{.*}}" "-target-cpu" "generic"
// RUN: %clang -target armv8r-linux-gnueabi -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8R %s
// RUN: %clang -target arm -march=armv8r -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8R %s
@@ -189,7 +189,7 @@
// RUN: %clang -target armv8a -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-V8A %s
// RUN: %clang -target arm -march=armebv8a -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-V8A %s
// RUN: %clang -target arm -march=armebv8-a -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-V8A %s
-// CHECK-BE-V8A: "-cc1"{{.*}} "-triple" "armebv8-{{.*}}" "-target-cpu" "cortex-a53"
+// CHECK-BE-V8A: "-cc1"{{.*}} "-triple" "armebv8-{{.*}}" "-target-cpu" "generic"
// RUN: %clang -target armv8 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8A-THUMB %s
// RUN: %clang -target arm -march=armv8 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8A-THUMB %s
@@ -199,7 +199,7 @@
// RUN: %clang -target arm -march=armv8 -mlittle-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8A-THUMB %s
// RUN: %clang -target armv8a -mlittle-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8A-THUMB %s
// RUN: %clang -target arm -march=armv8a -mlittle-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V8A-THUMB %s
-// CHECK-V8A-THUMB: "-cc1"{{.*}} "-triple" "thumbv8-{{.*}}" "-target-cpu" "cortex-a53"
+// CHECK-V8A-THUMB: "-cc1"{{.*}} "-triple" "thumbv8-{{.*}}" "-target-cpu" "generic"
// RUN: %clang -target armebv8 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-V8A-THUMB %s
// RUN: %clang -target armeb -march=armebv8 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-V8A-THUMB %s
@@ -209,7 +209,7 @@
// RUN: %clang -target arm -march=armebv8 -mbig-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-V8A-THUMB %s
// RUN: %clang -target armv8a -mbig-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-V8A-THUMB %s
// RUN: %clang -target arm -march=armebv8a -mbig-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-V8A-THUMB %s
-// CHECK-BE-V8A-THUMB: "-cc1"{{.*}} "-triple" "thumbebv8-{{.*}}" "-target-cpu" "cortex-a53"
+// CHECK-BE-V8A-THUMB: "-cc1"{{.*}} "-triple" "thumbebv8-{{.*}}" "-target-cpu" "generic"
// RUN: %clang -target arm -march=armv8.1a -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V81A %s
// RUN: %clang -target armv8.1a -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-V81A %s
diff --git a/test/Driver/cl-cc-flags.c b/test/Driver/cl-cc-flags.c
new file mode 100644
index 000000000000..76f116e199e2
--- /dev/null
+++ b/test/Driver/cl-cc-flags.c
@@ -0,0 +1,33 @@
+// Note: %s must be preceded by --, otherwise it may be interpreted as a
+// command-line option, e.g. on Mac where %s is commonly under /Users.
+
+// RUN: %clang_cl --target=i686-windows-msvc /Gd -### -- %s 2>&1 | FileCheck --check-prefix=CDECL %s
+// CDECL: -fdefault-calling-conv=cdecl
+
+// RUN: %clang_cl --target=i686-windows-msvc /Gr -### -- %s 2>&1 | FileCheck --check-prefix=FASTCALL %s
+// FASTCALL: -fdefault-calling-conv=fastcall
+
+// RUN: %clang_cl --target=i686-windows-msvc /Gz -### -- %s 2>&1 | FileCheck --check-prefix=STDCALL %s
+// STDCALL: -fdefault-calling-conv=stdcall
+
+// RUN: %clang_cl --target=i686-windows-msvc /Gv -### -- %s 2>&1 | FileCheck --check-prefix=VECTORCALL %s
+// VECTORCALL: -fdefault-calling-conv=vectorcall
+
+// Last one should win:
+
+// RUN: %clang_cl --target=i686-windows-msvc /Gd /Gv -### -- %s 2>&1 | FileCheck --check-prefix=LASTWINS_VECTOR %s
+// LASTWINS_VECTOR: -fdefault-calling-conv=vectorcall
+
+// RUN: %clang_cl --target=i686-windows-msvc /Gv /Gd -### -- %s 2>&1 | FileCheck --check-prefix=LASTWINS_CDECL %s
+// LASTWINS_CDECL: -fdefault-calling-conv=cdecl
+
+// No fastcall or stdcall on x86_64:
+
+// RUN: %clang_cl -Wno-msvc-not-found --target=x86_64-windows-msvc /Gr -### -- %s 2>&1 | FileCheck --check-prefix=UNSUPPORTED %s
+// RUN: %clang_cl -Wno-msvc-not-found --target=x86_64-windows-msvc /Gz -### -- %s 2>&1 | FileCheck --check-prefix=UNSUPPORTED %s
+// RUN: %clang_cl -Wno-msvc-not-found --target=thumbv7-windows-msvc /Gv -### -- %s 2>&1 | FileCheck --check-prefix=UNSUPPORTED %s
+
+// UNSUPPORTED-NOT: error:
+// UNSUPPORTED-NOT: warning:
+// UNSUPPORTED-NOT: -fdefault-calling-conv=
+
diff --git a/test/Driver/cl-diagnostics.c b/test/Driver/cl-diagnostics.c
new file mode 100644
index 000000000000..975a8cf8bb54
--- /dev/null
+++ b/test/Driver/cl-diagnostics.c
@@ -0,0 +1,28 @@
+// Note: %s must be preceded by --, otherwise it may be interpreted as a
+// command-line option, e.g. on Mac where %s is commonly under /Users.
+
+// RUN: %clang_cl /diagnostics:classic -### -- %s 2>&1 | FileCheck %s --check-prefix=CLASSIC
+// CLASSIC: -fno-caret-diagnostics
+// CLASSIC: -fno-show-column
+
+// RUN: %clang_cl /diagnostics:column -### -- %s 2>&1 | FileCheck %s --check-prefix=COLUMN
+// COLUMN: -fno-caret-diagnostics
+// COLUMN-NOT: -fno-show-column
+
+// RUN: %clang_cl /diagnostics:caret -### -- %s 2>&1 | FileCheck %s --check-prefix=CARET
+// CARET-NOT: -fno-caret-diagnostics
+// CARET-NOT: -fno-show-column
+
+// RUN: not %clang_cl -fms-compatibility-version=19 /diagnostics:classic /Zs -c -- %s 2>&1 | FileCheck %s --check-prefix=OUTPUT_CLASSIC
+
+// OUTPUT_CLASSIC: cl-diagnostics.c({{[0-9]+}}): error: "asdf"
+// OUTPUT_CLASSIC-NOT: #error
+
+// RUN: not %clang_cl -fms-compatibility-version=19 /diagnostics:caret /Zs -c -- %s 2>&1 | FileCheck %s --check-prefix=OUTPUT_CARET
+
+// OUTPUT_CARET: cl-diagnostics.c({{[0-9]+,[0-9]+}}): error: "asdf"
+// OUTPUT_CARET-NEXT: #error "asdf"
+// OUTPUT_CARET-NEXT: ^
+
+
+#error "asdf"
diff --git a/test/Driver/cl-include.c b/test/Driver/cl-include.c
new file mode 100644
index 000000000000..d3dc006e575f
--- /dev/null
+++ b/test/Driver/cl-include.c
@@ -0,0 +1,14 @@
+// Note: %s must be preceded by --, otherwise it may be interpreted as a
+// command-line option, e.g. on Mac where %s is commonly under /Users.
+
+// RUN: %clang_cl -### -- %s 2>&1 | FileCheck %s --check-prefix=BUILTIN
+// BUILTIN: "-internal-isystem" "{{.*lib.*clang.*include}}"
+
+// RUN: %clang_cl -nobuiltininc -### -- %s 2>&1 | FileCheck %s --check-prefix=NOBUILTIN
+// NOBUILTIN-NOT: "-internal-isystem" "{{.*lib.*clang.*include}}"
+
+// RUN: env INCLUDE=/my/system/inc %clang_cl -### -- %s 2>&1 | FileCheck %s --check-prefix=STDINC
+// STDINC: "-internal-isystem" "/my/system/inc"
+
+// RUN: env INCLUDE=/my/system/inc %clang_cl -nostdinc -### -- %s 2>&1 | FileCheck %s --check-prefix=NOSTDINC
+// NOSTDINC-NOT: "-internal-isystem" "/my/system/inc"
diff --git a/test/Driver/cl-zc.cpp b/test/Driver/cl-zc.cpp
index 26496293201e..4414eb6ebba2 100644
--- a/test/Driver/cl-zc.cpp
+++ b/test/Driver/cl-zc.cpp
@@ -53,6 +53,11 @@
// RUN: %clang_cl /c -### /Zc:inline- -- %s 2>&1 | FileCheck -check-prefix=INLINE-OFF %s
// INLINE-OFF: argument unused during compilation
+// RUN: %clang_cl /c -### /Zc:ternary -- %s 2>&1 | FileCheck -check-prefix=TERNARY-ON %s
+// TERNARY-ON-NOT: argument unused during compilation
+// RUN: %clang_cl /c -### /Zc:ternary- -- %s 2>&1 | FileCheck -check-prefix=TERNARY-OFF %s
+// TERNARY-OFF: argument unused during compilation
+
// These never warn, but don't have an effect yet.
diff --git a/test/Driver/fsanitize.c b/test/Driver/fsanitize.c
index 41f573aa7316..786f517f5371 100644
--- a/test/Driver/fsanitize.c
+++ b/test/Driver/fsanitize.c
@@ -3,18 +3,18 @@
// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined -fsanitize-undefined-trap-on-error %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED-TRAP
// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined-trap -fsanitize-undefined-trap-on-error %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED-TRAP
// RUN: %clang -target x86_64-linux-gnu -fsanitize-undefined-trap-on-error -fsanitize=undefined-trap %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED-TRAP
-// CHECK-UNDEFINED-TRAP: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}}
-// CHECK-UNDEFINED-TRAP: "-fsanitize-trap=alignment,array-bounds,bool,enum,float-cast-overflow,float-divide-by-zero,function,integer-divide-by-zero,nonnull-attribute,null,object-size,return,returns-nonnull-attribute,shift-base,shift-exponent,signed-integer-overflow,unreachable,vla-bound"
-// CHECK-UNDEFINED-TRAP2: "-fsanitize-trap=alignment,array-bounds,bool,enum,float-cast-overflow,float-divide-by-zero,function,integer-divide-by-zero,nonnull-attribute,null,object-size,return,returns-nonnull-attribute,shift-base,shift-exponent,unreachable,vla-bound"
+// CHECK-UNDEFINED-TRAP: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute|function),?){19}"}}
+// CHECK-UNDEFINED-TRAP: "-fsanitize-trap=alignment,array-bounds,bool,enum,float-cast-overflow,float-divide-by-zero,function,integer-divide-by-zero,nonnull-attribute,null,object-size,pointer-overflow,return,returns-nonnull-attribute,shift-base,shift-exponent,signed-integer-overflow,unreachable,vla-bound"
+// CHECK-UNDEFINED-TRAP2: "-fsanitize-trap=alignment,array-bounds,bool,enum,float-cast-overflow,float-divide-by-zero,function,integer-divide-by-zero,nonnull-attribute,null,object-size,pointer-overflow,return,returns-nonnull-attribute,shift-base,shift-exponent,unreachable,vla-bound"
// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED
-// CHECK-UNDEFINED: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|function|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|vptr|object-size|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){19}"}}
+// CHECK-UNDEFINED: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|function|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|vptr|object-size|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){20}"}}
// RUN: %clang -target x86_64-apple-darwin10 -fsanitize=undefined %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED-DARWIN
-// CHECK-UNDEFINED-DARWIN: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){17}"}}
+// CHECK-UNDEFINED-DARWIN: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){18}"}}
// RUN: %clang -target i386-unknown-openbsd -fsanitize=undefined %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED-OPENBSD
-// CHECK-UNDEFINED-OPENBSD: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){17}"}}
+// CHECK-UNDEFINED-OPENBSD: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){18}"}}
// RUN: %clang -target i386-pc-win32 -fsanitize=undefined %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED-WIN --check-prefix=CHECK-UNDEFINED-WIN32
// RUN: %clang -target i386-pc-win32 -fsanitize=undefined -x c++ %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED-WIN --check-prefix=CHECK-UNDEFINED-WIN32 --check-prefix=CHECK-UNDEFINED-WIN-CXX
@@ -23,7 +23,7 @@
// CHECK-UNDEFINED-WIN32: "--dependent-lib={{[^"]*}}ubsan_standalone-i386.lib"
// CHECK-UNDEFINED-WIN64: "--dependent-lib={{[^"]*}}ubsan_standalone-x86_64.lib"
// CHECK-UNDEFINED-WIN-CXX: "--dependent-lib={{[^"]*}}ubsan_standalone_cxx{{[^"]*}}.lib"
-// CHECK-UNDEFINED-WIN-SAME: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){17}"}}
+// CHECK-UNDEFINED-WIN-SAME: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){18}"}}
// RUN: %clang -target i386-pc-win32 -fsanitize-coverage=bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-COVERAGE-WIN32
// CHECK-COVERAGE-WIN32: "--dependent-lib={{[^"]*}}ubsan_standalone-i386.lib"
@@ -43,7 +43,7 @@
// CHECK-FNO-SANITIZE-ALL: "-fsanitize=thread"
// RUN: %clang -target x86_64-linux-gnu -fsanitize=thread,undefined -fno-sanitize=thread -fno-sanitize=float-cast-overflow,vptr,bool,enum %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-PARTIAL-UNDEFINED
-// CHECK-PARTIAL-UNDEFINED: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|function|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|array-bounds|returns-nonnull-attribute|nonnull-attribute),?){15}"}}
+// CHECK-PARTIAL-UNDEFINED: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|function|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|object-size|pointer-overflow|array-bounds|returns-nonnull-attribute|nonnull-attribute),?){16}"}}
// RUN: %clang -target x86_64-linux-gnu -fsanitize=shift -fno-sanitize=shift-base %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-FSANITIZE-SHIFT-PARTIAL
// CHECK-FSANITIZE-SHIFT-PARTIAL: "-fsanitize=shift-exponent"
@@ -217,7 +217,7 @@
// RUN: %clang -target x86_64-linux-gnu %s -fsanitize=undefined -fno-sanitize-recover=undefined -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-RECOVER-UBSAN
// RUN: %clang -target x86_64-linux-gnu %s -fsanitize=undefined -fno-sanitize-recover=all -fsanitize-recover=thread -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-RECOVER-UBSAN
// RUN: %clang -target x86_64-linux-gnu %s -fsanitize=undefined -fsanitize-recover=all -fno-sanitize-recover=undefined -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-RECOVER-UBSAN
-// CHECK-RECOVER-UBSAN: "-fsanitize-recover={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|function|shift-base|shift-exponent|vla-bound|alignment|null|vptr|object-size|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){17}"}}
+// CHECK-RECOVER-UBSAN: "-fsanitize-recover={{((signed-integer-overflow|integer-divide-by-zero|float-divide-by-zero|function|shift-base|shift-exponent|vla-bound|alignment|null|vptr|object-size|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|returns-nonnull-attribute|nonnull-attribute),?){18}"}}
// CHECK-NO-RECOVER-UBSAN-NOT: sanitize-recover
// RUN: %clang -target x86_64-linux-gnu %s -fsanitize=undefined -fno-sanitize-recover=all -fsanitize-recover=object-size,shift-base -### 2>&1 | FileCheck %s --check-prefix=CHECK-PARTIAL-RECOVER
diff --git a/test/Driver/gold-lto.c b/test/Driver/gold-lto.c
index 050b1ef18be1..b8eca5112b5a 100644
--- a/test/Driver/gold-lto.c
+++ b/test/Driver/gold-lto.c
@@ -19,7 +19,7 @@
// RUN: -march=armv7a -Wl,-plugin-opt=foo -O0 \
// RUN: | FileCheck %s --check-prefix=CHECK-ARM-V7A
// CHECK-ARM-V7A: "-plugin" "{{.*}}/LLVMgold.so"
-// CHECK-ARM-V7A: "-plugin-opt=mcpu=cortex-a8"
+// CHECK-ARM-V7A: "-plugin-opt=mcpu=generic"
// CHECK-ARM-V7A: "-plugin-opt=O0"
// CHECK-ARM-V7A: "-plugin-opt=foo"
//
diff --git a/test/Driver/nacl-direct.c b/test/Driver/nacl-direct.c
index f71e14f996ee..3c8e33b180b0 100644
--- a/test/Driver/nacl-direct.c
+++ b/test/Driver/nacl-direct.c
@@ -46,7 +46,7 @@
// RUN: | FileCheck --check-prefix=CHECK-ARM %s
// CHECK-ARM: {{.*}}clang{{.*}}" "-cc1"
// CHECK-ARM: "-fuse-init-array"
-// CHECK-ARM: "-target-cpu" "cortex-a8"
+// CHECK-ARM: "-target-cpu" "generic"
// CHECK-ARM: "-target-abi" "aapcs-linux"
// CHECK-ARM: "-mfloat-abi" "hard"
// CHECK-ARM: "-resource-dir" "foo"
diff --git a/test/Driver/openmp-offload.c b/test/Driver/openmp-offload.c
index 097dc7d17faa..39eb41e6ac08 100644
--- a/test/Driver/openmp-offload.c
+++ b/test/Driver/openmp-offload.c
@@ -210,14 +210,16 @@
// CHK-LKS-ST: INPUT([[T2BIN:.+\.out-openmp-x86_64-pc-linux-gnu]])
// CHK-LKS: SECTIONS
// CHK-LKS: {
-// CHK-LKS: .omp_offloading :
+// CHK-LKS: .omp_offloading.powerpc64le-ibm-linux-gnu :
// CHK-LKS: ALIGN(0x10)
// CHK-LKS: {
-// CHK-LKS: . = ALIGN(0x10);
// CHK-LKS: PROVIDE_HIDDEN(.omp_offloading.img_start.powerpc64le-ibm-linux-gnu = .);
// CHK-LKS: [[T1BIN]]
// CHK-LKS: PROVIDE_HIDDEN(.omp_offloading.img_end.powerpc64le-ibm-linux-gnu = .);
-// CHK-LKS: . = ALIGN(0x10);
+// CHK-LKS: }
+// CHK-LKS: .omp_offloading.x86_64-pc-linux-gnu :
+// CHK-LKS: ALIGN(0x10)
+// CHK-LKS: {
// CHK-LKS: PROVIDE_HIDDEN(.omp_offloading.img_start.x86_64-pc-linux-gnu = .);
// CHK-LKS: [[T2BIN]]
// CHK-LKS: PROVIDE_HIDDEN(.omp_offloading.img_end.x86_64-pc-linux-gnu = .);
diff --git a/test/Misc/diag-mapping2.c b/test/Misc/diag-mapping2.c
index 672d0548ad82..c3afea94b370 100644
--- a/test/Misc/diag-mapping2.c
+++ b/test/Misc/diag-mapping2.c
@@ -4,6 +4,7 @@
// This should not emit anything.
// RUN: %clang_cc1 %s -w 2>&1 | not grep diagnostic
// RUN: %clang_cc1 %s -Wno-#warnings 2>&1 | not grep diagnostic
+// RUN: %clang_cc1 %s -Wno-cpp 2>&1 | not grep diagnostic
// -Werror can map all warnings to error.
// RUN: not %clang_cc1 %s -Werror 2>&1 | grep "error: foo"
diff --git a/test/Modules/odr_hash.cpp b/test/Modules/odr_hash.cpp
index 947583bcfd21..a6a0b74743aa 100644
--- a/test/Modules/odr_hash.cpp
+++ b/test/Modules/odr_hash.cpp
@@ -866,6 +866,40 @@ S9 s9;
#endif
}
+namespace TemplateSpecializationType {
+#if defined(FIRST)
+template <class T1> struct U1 {};
+struct S1 {
+ U1<int> u;
+};
+#elif defined(SECOND)
+template <class T1, class T2> struct U1 {};
+struct S1 {
+ U1<int, int> u;
+};
+#else
+S1 s1;
+// expected-error@first.h:* {{'TemplateSpecializationType::S1::u' from module 'FirstModule' is not present in definition of 'TemplateSpecializationType::S1' in module 'SecondModule'}}
+// expected-note@second.h:* {{declaration of 'u' does not match}}
+#endif
+
+#if defined(FIRST)
+template <class T1> struct U2 {};
+struct S2 {
+ U2<int> u;
+};
+#elif defined(SECOND)
+template <class T1> struct V1 {};
+struct S2 {
+ V1<int> u;
+};
+#else
+S2 s2;
+// expected-error@first.h:* {{'TemplateSpecializationType::S2::u' from module 'FirstModule' is not present in definition of 'TemplateSpecializationType::S2' in module 'SecondModule'}}
+// expected-note@second.h:* {{declaration of 'u' does not match}}
+#endif
+}
+
// Interesting cases that should not cause errors. struct S should not error
// while struct T should error at the access specifier mismatch at the end.
namespace AllDecls {
diff --git a/test/Modules/preprocess-module.cpp b/test/Modules/preprocess-module.cpp
index eaab3136938f..9d1a2bb6801f 100644
--- a/test/Modules/preprocess-module.cpp
+++ b/test/Modules/preprocess-module.cpp
@@ -14,8 +14,6 @@
// RUN: FileCheck %s --input-file %t/rewrite.ii --check-prefix=CHECK --check-prefix=REWRITE
// Check that we can build a module from the preprocessed output.
-// FIXME: For now, we need the headers to exist.
-// RUN: touch %t/file.h %t/file2.h
// RUN: %clang_cc1 -fmodules -fmodule-name=file -fmodule-file=%t/fwd.pcm -x c++-module-map-cpp-output %t/no-rewrite.ii -emit-module -o %t/no-rewrite.pcm
// RUN: %clang_cc1 -fmodules -fmodule-name=file -fmodule-file=%t/fwd.pcm -x c++-module-map-cpp-output %t/rewrite.ii -emit-module -o %t/rewrite.pcm
@@ -27,6 +25,8 @@
// Check the module we built works.
// RUN: %clang_cc1 -fmodules -fmodule-file=%t/no-rewrite.pcm %s -I%t -verify -fno-modules-error-recovery
// RUN: %clang_cc1 -fmodules -fmodule-file=%t/rewrite.pcm %s -I%t -verify -fno-modules-error-recovery -DREWRITE
+// RUN: %clang_cc1 -fmodules -fmodule-file=%t/no-rewrite.pcm %s -I%t -verify -fno-modules-error-recovery -DINCLUDE -I%S/Inputs/preprocess
+// RUN: %clang_cc1 -fmodules -fmodule-file=%t/rewrite.pcm %s -I%t -verify -fno-modules-error-recovery -DREWRITE -DINCLUDE -I%S/Inputs/preprocess
// == module map
@@ -102,7 +102,11 @@ __FILE *a; // expected-error {{declaration of '__FILE' must be imported}}
// expected-note@no-rewrite.ii:1 {{here}}
#endif
+#ifdef INCLUDE
+#include "file.h"
+#else
#pragma clang module import file
+#endif
FILE *b;
-int x = file2;
+int x = file2; // ok, found in file2.h, even under -DINCLUDE
diff --git a/test/Modules/preprocess-nested.cpp b/test/Modules/preprocess-nested.cpp
index 8fccf137e94f..f26b65507ece 100644
--- a/test/Modules/preprocess-nested.cpp
+++ b/test/Modules/preprocess-nested.cpp
@@ -8,8 +8,6 @@
// RUN: FileCheck %s --input-file %t/rewrite.ii --check-prefix=CHECK --check-prefix=REWRITE
// Check that we can build a module from the preprocessed output.
-// FIXME: For now, the files need to exist.
-// RUN: touch %t/a.h %t/b.h %t/c.h
// RUN: %clang_cc1 -fmodules -fmodules-local-submodule-visibility -fmodule-name=nested -x c++-module-map-cpp-output %t/no-rewrite.ii -emit-module -o %t/no-rewrite.pcm
// RUN: %clang_cc1 -fmodules -fmodules-local-submodule-visibility -fmodule-name=nested -x c++-module-map-cpp-output %t/rewrite.ii -emit-module -o %t/rewrite.pcm
diff --git a/test/Modules/preprocess-unavailable.cpp b/test/Modules/preprocess-unavailable.cpp
index e568cd7b5251..cfd18253daf7 100644
--- a/test/Modules/preprocess-unavailable.cpp
+++ b/test/Modules/preprocess-unavailable.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -x c++-module-map %s -fmodule-name=a -verify
+// RUN: %clang_cc1 -x c++-module-map %s -fmodule-name=a -verify -std=c++98
module a {
module b {
requires cplusplus11
diff --git a/test/SemaCXX/attr-require-constant-initialization.cpp b/test/SemaCXX/attr-require-constant-initialization.cpp
index 3ed51071cd13..0df9f2e88029 100644
--- a/test/SemaCXX/attr-require-constant-initialization.cpp
+++ b/test/SemaCXX/attr-require-constant-initialization.cpp
@@ -7,9 +7,9 @@
#define ATTR __attribute__((require_constant_initialization)) // expected-note 0+ {{expanded from macro}}
-int ReturnInt();
+int ReturnInt(); // expected-note 0+ {{declared here}}
-struct PODType {
+struct PODType { // expected-note 0+ {{declared here}}
int value;
int value2;
};
@@ -20,20 +20,20 @@ struct PODType {
struct LitType {
constexpr LitType() : value(0) {}
constexpr LitType(int x) : value(x) {}
- LitType(void *) : value(-1) {}
+ LitType(void *) : value(-1) {} // expected-note 0+ {{declared here}}
int value;
};
#endif
-struct NonLit {
+struct NonLit { // expected-note 0+ {{declared here}}
#if __cplusplus >= 201402L
constexpr NonLit() : value(0) {}
constexpr NonLit(int x) : value(x) {}
#else
- NonLit() : value(0) {}
+ NonLit() : value(0) {} // expected-note 0+ {{declared here}}
NonLit(int x) : value(x) {}
#endif
- NonLit(void *) : value(-1) {}
+ NonLit(void *) : value(-1) {} // expected-note 0+ {{declared here}}
~NonLit() {}
int value;
};
@@ -43,7 +43,7 @@ struct StoresNonLit {
constexpr StoresNonLit() : obj() {}
constexpr StoresNonLit(int x) : obj(x) {}
#else
- StoresNonLit() : obj() {}
+ StoresNonLit() : obj() {} // expected-note 0+ {{declared here}}
StoresNonLit(int x) : obj(x) {}
#endif
StoresNonLit(void *p) : obj(p) {}
@@ -82,6 +82,12 @@ void test_basic_start_static_2_1() {
const int non_global = 42;
ATTR static const int &local_init = non_global; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+#if __cplusplus >= 201103L
+ // expected-note@-3 {{reference to 'non_global' is not a constant expression}}
+ // expected-note@-5 {{declared here}}
+#else
+ // expected-note@-6 {{subexpression not valid in a constant expression}}
+#endif
ATTR static const int &global_init = glvalue_int;
ATTR static const int &temp_init = 42;
}
@@ -89,8 +95,18 @@ void test_basic_start_static_2_1() {
ATTR const int &temp_ref = 42;
ATTR const int &temp_ref2 = ReturnInt(); // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+#if __cplusplus >= 201103L
+// expected-note@-3 {{non-constexpr function 'ReturnInt' cannot be used in a constant expression}}
+#else
+// expected-note@-5 {{subexpression not valid in a constant expression}}
+#endif
ATTR const NonLit &nl_temp_ref = 42; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+#if __cplusplus >= 201103L
+// expected-note@-3 {{non-literal type 'const NonLit' cannot be used in a constant expression}}
+#else
+// expected-note@-5 {{subexpression not valid in a constant expression}}
+#endif
#if __cplusplus >= 201103L
ATTR const LitType &lit_temp_ref = 42;
@@ -99,6 +115,11 @@ ATTR const int &subobj_ref = LitType{}.value;
ATTR const int &nl_subobj_ref = NonLit().value; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+#if __cplusplus >= 201103L
+// expected-note-re@-3 {{non-literal type '{{.*}}' cannot be used in a constant expression}}
+#else
+// expected-note@-5 {{subexpression not valid in a constant expression}}
+#endif
struct TT1 {
ATTR static const int &no_init;
@@ -116,6 +137,8 @@ const int &TT1::subobj_init = PODType().value;
#if __cplusplus >= 201103L
thread_local const int &TT1::tl_glvalue_init = glvalue_int;
thread_local const int &TT1::tl_temp_init = 42; // expected-error {{variable does not have a constant initializer}}
+// expected-note@-1 {{reference to temporary is not a constant expression}}
+// expected-note@-2 {{temporary created here}}
#endif
// [basic.start.static]p2.2
@@ -129,17 +152,25 @@ void test_basic_start_static_2_2() {
#else
ATTR static PODType pod; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{non-constexpr constructor 'PODType' cannot be used in a constant expression}}
#endif
ATTR static PODType pot2 = {ReturnInt()}; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+#if __cplusplus >= 201103L
+// expected-note@-3 {{non-constexpr function 'ReturnInt' cannot be used in a constant expression}}
+#else
+// expected-note@-5 {{subexpression not valid in a constant expression}}
+#endif
#if __cplusplus >= 201103L
constexpr LitType l;
ATTR static LitType static_lit = l;
ATTR static LitType static_lit2 = (void *)0; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+ // expected-note@-2 {{non-constexpr constructor 'LitType' cannot be used in a constant expression}}
ATTR static LitType static_lit3 = ReturnInt(); // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+ // expected-note@-2 {{non-constexpr function 'ReturnInt' cannot be used in a constant expression}}
ATTR thread_local LitType tls = 42;
#endif
}
@@ -157,15 +188,23 @@ struct TT2 {
ATTR static const NonLit non_lit_copy_init; // expected-note {{required by 'require_constant_initialization' attribute here}}
#endif
};
-PODType TT2::pod_noinit;
+PODType TT2::pod_noinit; // expected-note 0+ {{declared here}}
#if __cplusplus >= 201103L
// expected-error@-2 {{variable does not have a constant initializer}}
+// expected-note@-3 {{non-constexpr constructor 'PODType' cannot be used in a constant expression}}
#endif
PODType TT2::pod_copy_init(TT2::pod_noinit); // expected-error {{variable does not have a constant initializer}}
+#if __cplusplus >= 201103L
+// expected-note@-2 {{read of non-constexpr variable 'pod_noinit' is not allowed in a constant expression}}
+// expected-note@-3 {{in call to 'PODType(pod_noinit)'}}
+#else
+// expected-note@-5 {{subexpression not valid in a constant expression}}
+#endif
#if __cplusplus >= 201402L
const NonLit TT2::non_lit(42);
const NonLit TT2::non_lit_list_init = {42};
const NonLit TT2::non_lit_copy_init = 42; // expected-error {{variable does not have a constant initializer}}
+// expected-note@-1 {{subexpression not valid in a constant expression}}
#endif
#if __cplusplus >= 201103L
@@ -183,19 +222,25 @@ ATTR StoresNonLit snl;
#else
ATTR NonLit nl_ctor; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{non-constexpr constructor 'NonLit' cannot be used in a constant expression}}
ATTR NonLit nl_ctor2{}; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{non-constexpr constructor 'NonLit' cannot be used in a constant expression}}
ATTR NonLit nl_ctor3 = {}; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{non-constexpr constructor 'NonLit' cannot be used in a constant expression}}
ATTR thread_local NonLit nl_ctor_tl = {}; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{non-constexpr constructor 'NonLit' cannot be used in a constant expression}}
ATTR StoresNonLit snl; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{non-constexpr constructor 'StoresNonLit' cannot be used in a constant expression}}
#endif
// Non-literal types cannot appear in the initializer of a non-literal type.
ATTR int nl_in_init = NonLit{42}.value; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{non-literal type 'NonLit' cannot be used in a constant expression}}
ATTR int lit_in_init = LitType{42}.value;
#endif
@@ -218,6 +263,11 @@ ATTR PODType pod_missing_init = {42 /* should have second arg */};
ATTR PODType pod_full_init = {1, 2};
ATTR PODType pod_non_constexpr_init = {1, ReturnInt()}; // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+#if __cplusplus >= 201103L
+// expected-note@-3 {{non-constexpr function 'ReturnInt' cannot be used in a constant expression}}
+#else
+// expected-note@-5 {{subexpression not valid in a constant expression}}
+#endif
#if __cplusplus >= 201103L
ATTR int val_init{};
@@ -233,15 +283,17 @@ typedef const char *StrType;
// initializer
struct NotC {
constexpr NotC(void *) {}
- NotC(int) {}
+ NotC(int) {} // expected-note 0+ {{declared here}}
};
template <class T>
struct TestCtor {
constexpr TestCtor(int x) : value(x) {}
+ // expected-note@-1 {{non-constexpr constructor 'NotC' cannot be used in a constant expression}}
T value;
};
ATTR TestCtor<NotC> t(42); // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{in call to 'TestCtor(42)'}}
#endif
// Test various array types
@@ -261,9 +313,11 @@ struct TestCtor {
ATTR LitType non_const_lit(nullptr); // expected-error {{variable does not have a constant initializer}}
// expected-note@-1 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-2 {{non-constexpr constructor 'LitType' cannot be used in a constant expression}}
ATTR NonLit non_const(nullptr); // expected-error {{variable does not have a constant initializer}}
// expected-warning@-1 {{declaration requires a global destructor}}
// expected-note@-2 {{required by 'require_constant_initialization' attribute here}}
+// expected-note@-3 {{non-constexpr constructor 'NonLit' cannot be used in a constant expression}}
LitType const_init_lit(nullptr); // expected-warning {{declaration requires a global constructor}}
NonLit const_init{42}; // expected-warning {{declaration requires a global destructor}}
constexpr TestCtor<NotC> inval_constexpr(42); // expected-error {{must be initialized by a constant expression}}
diff --git a/test/SemaCXX/coreturn.cpp b/test/SemaCXX/coreturn.cpp
index 0ec94d1b5997..7265d7c19c2e 100644
--- a/test/SemaCXX/coreturn.cpp
+++ b/test/SemaCXX/coreturn.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -triple x86_64-apple-darwin9 %s -std=c++14 -fcoroutines-ts -fsyntax-only -Wignored-qualifiers -Wno-error=return-type -verify -fblocks -Wno-unreachable-code -Wno-unused-value
+// RUN: %clang_cc1 -triple x86_64-apple-darwin9 %s -std=c++14 -fcoroutines-ts -fsyntax-only -Wignored-qualifiers -Wno-error=return-type -verify -fblocks -Wall -Wextra -Wno-error=unreachable-code
#include "Inputs/std-coroutine.h"
using std::experimental::suspend_always;
diff --git a/test/SemaCXX/coroutine-uninitialized-warning-crash.cpp b/test/SemaCXX/coroutine-uninitialized-warning-crash.cpp
new file mode 100644
index 000000000000..5bdb232d5307
--- /dev/null
+++ b/test/SemaCXX/coroutine-uninitialized-warning-crash.cpp
@@ -0,0 +1,44 @@
+// RUN: %clang_cc1 -triple x86_64-apple-darwin9 %s -std=c++14 -fcoroutines-ts -fsyntax-only -Wall -Wextra -Wuninitialized -fblocks
+#include "Inputs/std-coroutine.h"
+
+using namespace std::experimental;
+
+
+struct A {
+ bool await_ready() { return true; }
+ int await_resume() { return 42; }
+ template <typename F>
+ void await_suspend(F) {}
+};
+
+
+struct coro_t {
+ struct promise_type {
+ coro_t get_return_object() { return {}; }
+ suspend_never initial_suspend() { return {}; }
+ suspend_never final_suspend() { return {}; }
+ A yield_value(int) { return {}; }
+ void return_void() {}
+ static void unhandled_exception() {}
+ };
+};
+
+coro_t f(int n) {
+ if (n == 0)
+ co_return;
+ co_yield 42;
+ int x = co_await A{};
+}
+
+template <class Await>
+coro_t g(int n) {
+ if (n == 0)
+ co_return;
+ co_yield 42;
+ int x = co_await Await{};
+}
+
+int main() {
+ f(0);
+ g<A>(0);
+}
diff --git a/test/SemaCXX/coroutines.cpp b/test/SemaCXX/coroutines.cpp
index 47ad86e5b02f..a867cf68a6e1 100644
--- a/test/SemaCXX/coroutines.cpp
+++ b/test/SemaCXX/coroutines.cpp
@@ -840,12 +840,12 @@ coro<bad_promise_no_return_func> no_return_value_or_return_void() {
struct bad_await_suspend_return {
bool await_ready();
- // expected-error@+1 {{the return type of 'await_suspend' is required to be 'void' or 'bool' (have 'char')}}
+ // expected-error@+1 {{return type of 'await_suspend' is required to be 'void' or 'bool' (have 'char')}}
char await_suspend(std::experimental::coroutine_handle<>);
void await_resume();
};
struct bad_await_ready_return {
- // expected-note@+1 {{the return type of 'await_ready' is required to be contextually convertible to 'bool'}}
+ // expected-note@+1 {{return type of 'await_ready' is required to be contextually convertible to 'bool'}}
void await_ready();
bool await_suspend(std::experimental::coroutine_handle<>);
void await_resume();
@@ -858,6 +858,14 @@ struct await_ready_explicit_bool {
void await_suspend(std::experimental::coroutine_handle<>);
void await_resume();
};
+template <class SuspendTy>
+struct await_suspend_type_test {
+ bool await_ready();
+ // expected-error@+2 {{return type of 'await_suspend' is required to be 'void' or 'bool' (have 'bool &')}}
+ // expected-error@+1 {{return type of 'await_suspend' is required to be 'void' or 'bool' (have 'bool &&')}}
+ SuspendTy await_suspend(std::experimental::coroutine_handle<>);
+ void await_resume();
+};
void test_bad_suspend() {
{
// FIXME: The actual error emitted here is terrible, and no number of notes can save it.
@@ -873,4 +881,14 @@ void test_bad_suspend() {
await_ready_explicit_bool c;
co_await c; // OK
}
+ {
+ await_suspend_type_test<bool &&> a;
+ await_suspend_type_test<bool &> b;
+ await_suspend_type_test<const void> c;
+ await_suspend_type_test<const volatile bool> d;
+ co_await a; // expected-note {{call to 'await_suspend' implicitly required by coroutine function here}}
+ co_await b; // expected-note {{call to 'await_suspend' implicitly required by coroutine function here}}
+ co_await c; // OK
+ co_await d; // OK
+ }
}
diff --git a/test/SemaCXX/type-traits.cpp b/test/SemaCXX/type-traits.cpp
index 919122576222..8a994f01ba1a 100644
--- a/test/SemaCXX/type-traits.cpp
+++ b/test/SemaCXX/type-traits.cpp
@@ -252,6 +252,11 @@ void is_pod()
{ int arr[F(__is_pod(void))]; }
{ int arr[F(__is_pod(cvoid))]; }
// { int arr[F(__is_pod(NonPODUnion))]; }
+
+ { int arr[T(__is_pod(ACompleteType))]; }
+ { int arr[F(__is_pod(AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_pod(AnIncompleteType[]))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_pod(AnIncompleteType[1]))]; } // expected-error {{incomplete type}}
}
typedef Empty EmptyAr[10];
@@ -287,6 +292,11 @@ void is_empty()
{ int arr[F(__is_empty(IntArNB))]; }
{ int arr[F(__is_empty(HasAnonymousUnion))]; }
// { int arr[F(__is_empty(DerivesVirt))]; }
+
+ { int arr[T(__is_empty(ACompleteType))]; }
+ { int arr[F(__is_empty(AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_empty(AnIncompleteType[]))]; }
+ { int arr[F(__is_empty(AnIncompleteType[1]))]; }
}
typedef Derives ClassType;
@@ -511,6 +521,8 @@ void is_aggregate()
constexpr bool TrueAfterCpp14 = __cplusplus > 201402L;
__is_aggregate(AnIncompleteType); // expected-error {{incomplete type}}
+ __is_aggregate(AnIncompleteType[]); // expected-error {{incomplete type}}
+ __is_aggregate(AnIncompleteType[1]); // expected-error {{incomplete type}}
__is_aggregate(AnIncompleteTypeAr); // expected-error {{incomplete type}}
__is_aggregate(AnIncompleteTypeArNB); // expected-error {{incomplete type}}
__is_aggregate(AnIncompleteTypeArMB); // expected-error {{incomplete type}}
@@ -1220,6 +1232,13 @@ void is_trivial2()
int t32[F(__is_trivial(SuperNonTrivialStruct))];
int t33[F(__is_trivial(NonTCStruct))];
int t34[F(__is_trivial(ExtDefaulted))];
+
+ int t40[T(__is_trivial(ACompleteType))];
+ int t41[F(__is_trivial(AnIncompleteType))]; // expected-error {{incomplete type}}
+ int t42[F(__is_trivial(AnIncompleteType[]))]; // expected-error {{incomplete type}}
+ int t43[F(__is_trivial(AnIncompleteType[1]))]; // expected-error {{incomplete type}}
+ int t44[F(__is_trivial(void))];
+ int t45[F(__is_trivial(const volatile void))];
}
void is_trivially_copyable2()
@@ -1257,6 +1276,13 @@ void is_trivially_copyable2()
int t34[T(__is_trivially_copyable(const int))];
int t35[T(__is_trivially_copyable(volatile int))];
+
+ int t40[T(__is_trivially_copyable(ACompleteType))];
+ int t41[F(__is_trivially_copyable(AnIncompleteType))]; // expected-error {{incomplete type}}
+ int t42[F(__is_trivially_copyable(AnIncompleteType[]))]; // expected-error {{incomplete type}}
+ int t43[F(__is_trivially_copyable(AnIncompleteType[1]))]; // expected-error {{incomplete type}}
+ int t44[F(__is_trivially_copyable(void))];
+ int t45[F(__is_trivially_copyable(const volatile void))];
}
struct CStruct {
@@ -1320,6 +1346,13 @@ void is_standard_layout()
int t15[F(__is_standard_layout(CppStructNonStandardByBaseAr))];
int t16[F(__is_standard_layout(CppStructNonStandardBySameBase))];
int t17[F(__is_standard_layout(CppStructNonStandardBy2ndVirtBase))];
+
+ int t40[T(__is_standard_layout(ACompleteType))];
+ int t41[F(__is_standard_layout(AnIncompleteType))]; // expected-error {{incomplete type}}
+ int t42[F(__is_standard_layout(AnIncompleteType[]))]; // expected-error {{incomplete type}}
+ int t43[F(__is_standard_layout(AnIncompleteType[1]))]; // expected-error {{incomplete type}}
+ int t44[F(__is_standard_layout(void))];
+ int t45[F(__is_standard_layout(const volatile void))];
}
void is_signed()
@@ -2133,6 +2166,13 @@ void trivial_checks()
TrivialMoveButNotCopy)))]; }
{ int arr[T((__is_assignable(TrivialMoveButNotCopy &,
TrivialMoveButNotCopy &&)))]; }
+
+ { int arr[T(__is_assignable(ACompleteType, ACompleteType))]; }
+ { int arr[F(__is_assignable(AnIncompleteType, AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_assignable(AnIncompleteType[], AnIncompleteType[]))]; }
+ { int arr[F(__is_assignable(AnIncompleteType[1], AnIncompleteType[1]))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_assignable(void, void))]; }
+ { int arr[F(__is_assignable(const volatile void, const volatile void))]; }
}
void constructible_checks() {
@@ -2164,6 +2204,19 @@ void constructible_checks() {
// PR25513
{ int arr[F(__is_constructible(int(int)))]; }
+
+ { int arr[T(__is_constructible(ACompleteType))]; }
+ { int arr[T(__is_nothrow_constructible(ACompleteType))]; }
+ { int arr[F(__is_constructible(AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_nothrow_constructible(AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_constructible(AnIncompleteType[]))]; }
+ { int arr[F(__is_nothrow_constructible(AnIncompleteType[]))]; }
+ { int arr[F(__is_constructible(AnIncompleteType[1]))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_nothrow_constructible(AnIncompleteType[1]))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_constructible(void))]; }
+ { int arr[F(__is_nothrow_constructible(void))]; }
+ { int arr[F(__is_constructible(const volatile void))]; }
+ { int arr[F(__is_nothrow_constructible(const volatile void))]; }
}
// Instantiation of __is_trivially_constructible
@@ -2192,6 +2245,13 @@ void is_trivially_constructible_test() {
{ int arr[F((is_trivially_constructible<NonTrivialDefault>::value))]; }
{ int arr[F((is_trivially_constructible<ThreeArgCtor, int*, char*, int&>::value))]; }
{ int arr[F((is_trivially_constructible<Abstract>::value))]; } // PR19178
+
+ { int arr[T(__is_trivially_constructible(ACompleteType))]; }
+ { int arr[F(__is_trivially_constructible(AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_trivially_constructible(AnIncompleteType[]))]; }
+ { int arr[F(__is_trivially_constructible(AnIncompleteType[1]))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_trivially_constructible(void))]; }
+ { int arr[F(__is_trivially_constructible(const volatile void))]; }
}
void array_rank() {
@@ -2218,6 +2278,13 @@ void is_destructible_test() {
{ int arr[F(__is_destructible(AllDeleted))]; }
{ int arr[T(__is_destructible(ThrowingDtor))]; }
{ int arr[T(__is_destructible(NoThrowDtor))]; }
+
+ { int arr[T(__is_destructible(ACompleteType))]; }
+ { int arr[F(__is_destructible(AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_destructible(AnIncompleteType[]))]; }
+ { int arr[F(__is_destructible(AnIncompleteType[1]))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_destructible(void))]; }
+ { int arr[F(__is_destructible(const volatile void))]; }
}
void is_nothrow_destructible_test() {
@@ -2234,4 +2301,33 @@ void is_nothrow_destructible_test() {
{ int arr[F(__is_nothrow_destructible(ThrowingDtor))]; }
{ int arr[T(__is_nothrow_destructible(NoExceptDtor))]; }
{ int arr[T(__is_nothrow_destructible(NoThrowDtor))]; }
+
+ { int arr[T(__is_nothrow_destructible(ACompleteType))]; }
+ { int arr[F(__is_nothrow_destructible(AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_nothrow_destructible(AnIncompleteType[]))]; }
+ { int arr[F(__is_nothrow_destructible(AnIncompleteType[1]))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_nothrow_destructible(void))]; }
+ { int arr[F(__is_nothrow_destructible(const volatile void))]; }
+}
+
+void is_trivially_destructible_test() {
+ { int arr[T(__is_trivially_destructible(int))]; }
+ { int arr[T(__is_trivially_destructible(int[2]))]; }
+ { int arr[F(__is_trivially_destructible(int[]))]; }
+ { int arr[F(__is_trivially_destructible(void))]; }
+ { int arr[T(__is_trivially_destructible(int &))]; }
+ { int arr[F(__is_trivially_destructible(HasDest))]; }
+ { int arr[F(__is_trivially_destructible(AllPrivate))]; }
+ { int arr[F(__is_trivially_destructible(SuperNonTrivialStruct))]; }
+ { int arr[T(__is_trivially_destructible(AllDefaulted))]; }
+ { int arr[F(__is_trivially_destructible(AllDeleted))]; }
+ { int arr[F(__is_trivially_destructible(ThrowingDtor))]; }
+ { int arr[F(__is_trivially_destructible(NoThrowDtor))]; }
+
+ { int arr[T(__is_trivially_destructible(ACompleteType))]; }
+ { int arr[F(__is_trivially_destructible(AnIncompleteType))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_trivially_destructible(AnIncompleteType[]))]; }
+ { int arr[F(__is_trivially_destructible(AnIncompleteType[1]))]; } // expected-error {{incomplete type}}
+ { int arr[F(__is_trivially_destructible(void))]; }
+ { int arr[F(__is_trivially_destructible(const volatile void))]; }
}
diff --git a/test/SemaObjC/attr-deprecated.m b/test/SemaObjC/attr-deprecated.m
index b0613851ddaa..28031997af7a 100644
--- a/test/SemaObjC/attr-deprecated.m
+++ b/test/SemaObjC/attr-deprecated.m
@@ -83,7 +83,7 @@ int t5() {
}
-__attribute ((deprecated)) // expected-note 2 {{'DEPRECATED' has been explicitly marked deprecated here}}
+__attribute ((deprecated)) // expected-note {{'DEPRECATED' has been explicitly marked deprecated here}}
@interface DEPRECATED {
@public int ivar;
DEPRECATED *ivar2; // no warning.
@@ -98,9 +98,17 @@ __attribute ((deprecated)) // expected-note 2 {{'DEPRECATED' has been explicitly
@end
@interface DEPRECATED (Category2) // no warning.
+- (id)meth;
@end
-@implementation DEPRECATED (Category2) // expected-warning {{'DEPRECATED' is deprecated}}
+__attribute__((deprecated))
+void depr_function();
+
+@implementation DEPRECATED (Category2) // no warning
+- (id)meth {
+ depr_function(); // no warning.
+ return 0;
+}
@end
@interface NS : DEPRECATED // expected-warning {{'DEPRECATED' is deprecated}}
diff --git a/test/SemaObjC/class-unavail-warning.m b/test/SemaObjC/class-unavail-warning.m
index 268d51910b0c..f7d8f569ca67 100644
--- a/test/SemaObjC/class-unavail-warning.m
+++ b/test/SemaObjC/class-unavail-warning.m
@@ -2,7 +2,7 @@
// rdar://9092208
__attribute__((unavailable("not available")))
-@interface MyClass { // expected-note 8 {{'MyClass' has been explicitly marked unavailable here}}
+@interface MyClass { // expected-note 7 {{'MyClass' has been explicitly marked unavailable here}}
@public
void *_test;
MyClass *ivar; // no error.
@@ -28,7 +28,7 @@ __attribute__((unavailable("not available")))
@interface MyClass (Cat2) // no error.
@end
-@implementation MyClass (Cat2) // expected-error {{unavailable}}
+@implementation MyClass (Cat2) // no error.
@end
int main() {
diff --git a/test/SemaObjC/warn-deprecated-implementations.m b/test/SemaObjC/warn-deprecated-implementations.m
index 0c341165b0f0..df2557b9cd52 100644
--- a/test/SemaObjC/warn-deprecated-implementations.m
+++ b/test/SemaObjC/warn-deprecated-implementations.m
@@ -28,15 +28,14 @@
- (void) G {} // No warning, implementing its own deprecated method
@end
-__attribute__((deprecated)) // expected-note 2 {{'CL' has been explicitly marked deprecated here}}
+__attribute__((deprecated)) // expected-note {{'CL' has been explicitly marked deprecated here}}
@interface CL // expected-note 2 {{class declared here}}
@end
@implementation CL // expected-warning {{Implementing deprecated class}}
@end
-@implementation CL ( SomeCategory ) // expected-warning {{'CL' is deprecated}} \
- // expected-warning {{Implementing deprecated category}}
+@implementation CL (SomeCategory) // expected-warning {{Implementing deprecated category}}
@end
@interface CL_SUB : CL // expected-warning {{'CL' is deprecated}}
diff --git a/unittests/Format/FormatTestJS.cpp b/unittests/Format/FormatTestJS.cpp
index d8f887f9c788..92a113111b6a 100644
--- a/unittests/Format/FormatTestJS.cpp
+++ b/unittests/Format/FormatTestJS.cpp
@@ -474,9 +474,8 @@ TEST_F(FormatTestJS, FormatsFreestandingFunctions) {
"(function f() {\n"
" var x = 1;\n"
"}());\n");
- // Known issue: this should wrap after {}, but calculateBraceTypes
- // misclassifies the first braces as a BK_BracedInit.
- verifyFormat("function aFunction(){} {\n"
+ verifyFormat("function aFunction() {}\n"
+ "{\n"
" let x = 1;\n"
" console.log(x);\n"
"}\n");
@@ -1233,6 +1232,10 @@ TEST_F(FormatTestJS, ClassDeclarations) {
verifyFormat("class C {\n x: string = 12;\n}");
verifyFormat("class C {\n x(): string => 12;\n}");
verifyFormat("class C {\n ['x' + 2]: string = 12;\n}");
+ verifyFormat("class C {\n"
+ " foo() {}\n"
+ " [bar]() {}\n"
+ "}\n");
verifyFormat("class C {\n private x: string = 12;\n}");
verifyFormat("class C {\n private static x: string = 12;\n}");
verifyFormat("class C {\n static x(): string {\n return 'asd';\n }\n}");
diff --git a/utils/TableGen/ClangAttrEmitter.cpp b/utils/TableGen/ClangAttrEmitter.cpp
index edd3e38471be..b6d2988964b4 100644
--- a/utils/TableGen/ClangAttrEmitter.cpp
+++ b/utils/TableGen/ClangAttrEmitter.cpp
@@ -718,9 +718,9 @@ namespace {
};
// Unique the enums, but maintain the original declaration ordering.
- std::vector<std::string>
- uniqueEnumsInOrder(const std::vector<std::string> &enums) {
- std::vector<std::string> uniques;
+ std::vector<StringRef>
+ uniqueEnumsInOrder(const std::vector<StringRef> &enums) {
+ std::vector<StringRef> uniques;
SmallDenseSet<StringRef, 8> unique_set;
for (const auto &i : enums) {
if (unique_set.insert(i).second)
@@ -731,7 +731,8 @@ namespace {
class EnumArgument : public Argument {
std::string type;
- std::vector<std::string> values, enums, uniques;
+ std::vector<StringRef> values, enums, uniques;
+
public:
EnumArgument(const Record &Arg, StringRef Attr)
: Argument(Arg, Attr), type(Arg.getValueAsString("Type")),
@@ -850,7 +851,7 @@ namespace {
class VariadicEnumArgument: public VariadicArgument {
std::string type, QualifiedTypeName;
- std::vector<std::string> values, enums, uniques;
+ std::vector<StringRef> values, enums, uniques;
protected:
void writeValueImpl(raw_ostream &OS) const override {
@@ -1591,8 +1592,9 @@ struct AttributeSubjectMatchRule {
}
std::string getEnumValueName() const {
- std::string Result =
- "SubjectMatchRule_" + MetaSubject->getValueAsString("Name");
+ SmallString<128> Result;
+ Result += "SubjectMatchRule_";
+ Result += MetaSubject->getValueAsString("Name");
if (isSubRule()) {
Result += "_";
if (isNegatedSubRule())
@@ -1601,7 +1603,7 @@ struct AttributeSubjectMatchRule {
}
if (isAbstractRule())
Result += "_abstract";
- return Result;
+ return Result.str();
}
std::string getEnumValue() const { return "attr::" + getEnumValueName(); }
@@ -2603,7 +2605,7 @@ void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS) {
// append a unique suffix to distinguish this set of target checks from other
// TargetSpecificAttr records.
static void GenerateTargetSpecificAttrChecks(const Record *R,
- std::vector<std::string> &Arches,
+ std::vector<StringRef> &Arches,
std::string &Test,
std::string *FnName) {
// It is assumed that there will be an llvm::Triple object
@@ -2613,8 +2615,9 @@ static void GenerateTargetSpecificAttrChecks(const Record *R,
Test += "(";
for (auto I = Arches.begin(), E = Arches.end(); I != E; ++I) {
- std::string Part = *I;
- Test += "T.getArch() == llvm::Triple::" + Part;
+ StringRef Part = *I;
+ Test += "T.getArch() == llvm::Triple::";
+ Test += Part;
if (I + 1 != E)
Test += " || ";
if (FnName)
@@ -2627,11 +2630,12 @@ static void GenerateTargetSpecificAttrChecks(const Record *R,
// We know that there was at least one arch test, so we need to and in the
// OS tests.
Test += " && (";
- std::vector<std::string> OSes = R->getValueAsListOfStrings("OSes");
+ std::vector<StringRef> OSes = R->getValueAsListOfStrings("OSes");
for (auto I = OSes.begin(), E = OSes.end(); I != E; ++I) {
- std::string Part = *I;
+ StringRef Part = *I;
- Test += "T.getOS() == llvm::Triple::" + Part;
+ Test += "T.getOS() == llvm::Triple::";
+ Test += Part;
if (I + 1 != E)
Test += " || ";
if (FnName)
@@ -2643,10 +2647,11 @@ static void GenerateTargetSpecificAttrChecks(const Record *R,
// If one or more CXX ABIs are specified, check those as well.
if (!R->isValueUnset("CXXABIs")) {
Test += " && (";
- std::vector<std::string> CXXABIs = R->getValueAsListOfStrings("CXXABIs");
+ std::vector<StringRef> CXXABIs = R->getValueAsListOfStrings("CXXABIs");
for (auto I = CXXABIs.begin(), E = CXXABIs.end(); I != E; ++I) {
- std::string Part = *I;
- Test += "Target.getCXXABI().getKind() == TargetCXXABI::" + Part;
+ StringRef Part = *I;
+ Test += "Target.getCXXABI().getKind() == TargetCXXABI::";
+ Test += Part;
if (I + 1 != E)
Test += " || ";
if (FnName)
@@ -2684,7 +2689,7 @@ static void GenerateHasAttrSpellingStringSwitch(
std::string Test;
if (Attr->isSubClassOf("TargetSpecificAttr")) {
const Record *R = Attr->getValueAsDef("Target");
- std::vector<std::string> Arches = R->getValueAsListOfStrings("Arches");
+ std::vector<StringRef> Arches = R->getValueAsListOfStrings("Arches");
GenerateTargetSpecificAttrChecks(R, Arches, Test, nullptr);
// If this is the C++11 variety, also add in the LangOpts test.
@@ -3323,7 +3328,7 @@ static std::string GenerateTargetRequirements(const Record &Attr,
// Get the list of architectures to be tested for.
const Record *R = Attr.getValueAsDef("Target");
- std::vector<std::string> Arches = R->getValueAsListOfStrings("Arches");
+ std::vector<StringRef> Arches = R->getValueAsListOfStrings("Arches");
if (Arches.empty()) {
PrintError(Attr.getLoc(), "Empty list of target architectures for a "
"target-specific attr");
@@ -3340,9 +3345,10 @@ static std::string GenerateTargetRequirements(const Record &Attr,
std::string APK = Attr.getValueAsString("ParseKind");
for (const auto &I : Dupes) {
if (I.first == APK) {
- std::vector<std::string> DA = I.second->getValueAsDef("Target")
- ->getValueAsListOfStrings("Arches");
- std::move(DA.begin(), DA.end(), std::back_inserter(Arches));
+ std::vector<StringRef> DA =
+ I.second->getValueAsDef("Target")->getValueAsListOfStrings(
+ "Arches");
+ Arches.insert(Arches.end(), DA.begin(), DA.end());
}
}
}
diff --git a/utils/TableGen/ClangDiagnosticsEmitter.cpp b/utils/TableGen/ClangDiagnosticsEmitter.cpp
index cad08afd846f..d9d99e0bb002 100644
--- a/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -1277,8 +1277,8 @@ void EmitClangDiagDocs(RecordKeeper &Records, raw_ostream &OS) {
bool IsSynonym = GroupInfo.DiagsInGroup.empty() &&
GroupInfo.SubGroups.size() == 1;
- writeHeader((IsRemarkGroup ? "-R" : "-W") +
- G->getValueAsString("GroupName"),
+ writeHeader(((IsRemarkGroup ? "-R" : "-W") +
+ G->getValueAsString("GroupName")).str(),
OS);
if (!IsSynonym) {
diff --git a/utils/TableGen/ClangOptionDocEmitter.cpp b/utils/TableGen/ClangOptionDocEmitter.cpp
index aa7502e2c850..59314510e0ad 100644
--- a/utils/TableGen/ClangOptionDocEmitter.cpp
+++ b/utils/TableGen/ClangOptionDocEmitter.cpp
@@ -83,7 +83,7 @@ Documentation extractDocumentation(RecordKeeper &Records) {
}
// Pretend no-X and Xno-Y options are aliases of X and XY.
- auto Name = R->getValueAsString("Name");
+ std::string Name = R->getValueAsString("Name");
if (Name.size() >= 4) {
if (Name.substr(0, 3) == "no-" && OptionsByName[Name.substr(3)]) {
Aliases[OptionsByName[Name.substr(3)]].push_back(R);
@@ -229,7 +229,7 @@ std::string getRSTStringWithTextFallback(const Record *R, StringRef Primary,
}
void emitOptionWithArgs(StringRef Prefix, const Record *Option,
- ArrayRef<std::string> Args, raw_ostream &OS) {
+ ArrayRef<StringRef> Args, raw_ostream &OS) {
OS << Prefix << escapeRST(Option->getValueAsString("Name"));
std::pair<StringRef, StringRef> Separators =
@@ -261,14 +261,15 @@ void emitOptionName(StringRef Prefix, const Record *Option, raw_ostream &OS) {
}
}
- emitOptionWithArgs(Prefix, Option, Args, OS);
+ emitOptionWithArgs(Prefix, Option, std::vector<StringRef>(Args.begin(), Args.end()), OS);
auto AliasArgs = Option->getValueAsListOfStrings("AliasArgs");
if (!AliasArgs.empty()) {
Record *Alias = Option->getValueAsDef("Alias");
OS << " (equivalent to ";
- emitOptionWithArgs(Alias->getValueAsListOfStrings("Prefixes").front(),
- Alias, Option->getValueAsListOfStrings("AliasArgs"), OS);
+ emitOptionWithArgs(
+ Alias->getValueAsListOfStrings("Prefixes").front(), Alias,
+ AliasArgs, OS);
OS << ")";
}
}
@@ -310,7 +311,7 @@ void emitOption(const DocumentedOption &Option, const Record *DocInfo,
forEachOptionName(Option, DocInfo, [&](const Record *Option) {
for (auto &Prefix : Option->getValueAsListOfStrings("Prefixes"))
SphinxOptionIDs.push_back(
- getSphinxOptionID(Prefix + Option->getValueAsString("Name")));
+ getSphinxOptionID((Prefix + Option->getValueAsString("Name")).str()));
});
assert(!SphinxOptionIDs.empty() && "no flags for option");
static std::map<std::string, int> NextSuffix;
diff --git a/utils/TableGen/ClangSACheckersEmitter.cpp b/utils/TableGen/ClangSACheckersEmitter.cpp
index 115527ae3303..8f3de0b67d77 100644
--- a/utils/TableGen/ClangSACheckersEmitter.cpp
+++ b/utils/TableGen/ClangSACheckersEmitter.cpp
@@ -51,7 +51,8 @@ static std::string getParentPackageFullName(const Record *R) {
static std::string getPackageFullName(const Record *R) {
std::string name = getParentPackageFullName(R);
if (!name.empty()) name += ".";
- return name + R->getValueAsString("PackageName");
+ name += R->getValueAsString("PackageName");
+ return name;
}
static std::string getCheckerFullName(const Record *R) {