aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-09-06 18:36:24 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-09-06 18:36:24 +0000
commit36c5ade2f4674b544039d78db4c466756cf142b0 (patch)
tree3d3ed1e1987dbe6444294b1b4e249814b97b97a5
parent51ece4aae5857052d224ce52277924c74685714e (diff)
downloadsrc-36c5ade2f4674b544039d78db4c466756cf142b0.tar.gz
src-36c5ade2f4674b544039d78db4c466756cf142b0.zip
Import clang 3.7.0 release (r246257).vendor/clang/clang-release_370-r246257
Notes
Notes: svn path=/vendor/clang/dist/; revision=287512 svn path=/vendor/clang/clang-release_370-r246257/; revision=287513; tag=vendor/clang/clang-release_370-r246257
-rw-r--r--README.txt2
-rw-r--r--docs/AttributeReference.rst1759
-rw-r--r--docs/CMakeLists.txt9
-rw-r--r--docs/ExternalClangExamples.rst2
-rw-r--r--docs/InternalsManual.rst2
-rw-r--r--docs/Makefile1
-rw-r--r--docs/ReleaseNotes.rst237
-rw-r--r--docs/UsersManual.rst73
-rw-r--r--docs/doxygen.cfg.in2
-rw-r--r--include/clang/AST/ASTVector.h17
-rw-r--r--include/clang/AST/NSAPI.h8
-rw-r--r--include/clang/AST/StmtOpenMP.h25
-rw-r--r--include/clang/Analysis/Support/BumpVector.h17
-rw-r--r--include/clang/Basic/Attr.td2
-rw-r--r--include/clang/Basic/AttrDocs.td28
-rw-r--r--include/clang/Basic/DiagnosticCommonKinds.td2
-rw-r--r--include/clang/Basic/DiagnosticParseKinds.td4
-rw-r--r--include/clang/Basic/DiagnosticSemaKinds.td2
-rw-r--r--include/clang/Basic/LangOptions.def1
-rw-r--r--include/clang/Basic/TokenKinds.def8
-rw-r--r--include/clang/CodeGen/ObjectFilePCHContainerOperations.h24
-rw-r--r--include/clang/Driver/CC1Options.td3
-rw-r--r--include/clang/Driver/Options.td7
-rw-r--r--include/clang/Frontend/ASTUnit.h4
-rw-r--r--include/clang/Frontend/CompilerInstance.h32
-rw-r--r--include/clang/Frontend/PCHContainerOperations.h72
-rw-r--r--include/clang/Frontend/Utils.h4
-rw-r--r--include/clang/Lex/HeaderSearchOptions.h21
-rw-r--r--include/clang/Parse/Parser.h21
-rw-r--r--include/clang/Sema/Sema.h18
-rw-r--r--include/clang/Serialization/ASTReader.h10
-rw-r--r--include/clang/Serialization/GlobalModuleIndex.h3
-rw-r--r--include/clang/Serialization/ModuleManager.h8
-rw-r--r--include/clang/Tooling/Refactoring.h2
-rw-r--r--include/clang/Tooling/Tooling.h12
-rw-r--r--lib/ARCMigrate/ARCMT.cpp10
-rw-r--r--lib/AST/NSAPI.cpp22
-rw-r--r--lib/AST/NestedNameSpecifier.cpp16
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp6
-rw-r--r--lib/AST/Stmt.cpp13
-rw-r--r--lib/Basic/FileManager.cpp4
-rw-r--r--lib/Basic/IdentifierTable.cpp3
-rw-r--r--lib/Basic/Module.cpp1
-rw-r--r--lib/Basic/Targets.cpp16
-rw-r--r--lib/Basic/Version.cpp2
-rw-r--r--lib/CodeGen/CGBuiltin.cpp9
-rw-r--r--lib/CodeGen/CGDebugInfo.h6
-rw-r--r--lib/CodeGen/CGExprScalar.cpp13
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp40
-rw-r--r--lib/CodeGen/CGVTables.cpp29
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp18
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp4
-rw-r--r--lib/CodeGen/TargetInfo.cpp39
-rw-r--r--lib/Driver/MinGWToolChain.cpp201
-rw-r--r--lib/Driver/ToolChain.cpp11
-rw-r--r--lib/Driver/ToolChains.h4
-rw-r--r--lib/Driver/Tools.cpp276
-rw-r--r--lib/Driver/Tools.h6
-rw-r--r--lib/Frontend/ASTMerge.cpp2
-rw-r--r--lib/Frontend/ASTUnit.cpp6
-rw-r--r--lib/Frontend/ChainedIncludesSource.cpp2
-rw-r--r--lib/Frontend/CompilerInstance.cpp16
-rw-r--r--lib/Frontend/CompilerInvocation.cpp6
-rw-r--r--lib/Frontend/FrontendAction.cpp6
-rw-r--r--lib/Frontend/FrontendActions.cpp8
-rw-r--r--lib/Frontend/InitPreprocessor.cpp8
-rw-r--r--lib/Frontend/PCHContainerOperations.cpp31
-rw-r--r--lib/Headers/CMakeLists.txt1
-rw-r--r--lib/Headers/__wmmintrin_aes.h8
-rw-r--r--lib/Headers/__wmmintrin_pclmul.h4
-rw-r--r--lib/Headers/adxintrin.h6
-rw-r--r--lib/Headers/altivec.h227
-rw-r--r--lib/Headers/ammintrin.h8
-rw-r--r--lib/Headers/avx2intrin.h2
-rw-r--r--lib/Headers/avx512bwintrin.h2
-rw-r--r--lib/Headers/avx512dqintrin.h2
-rw-r--r--lib/Headers/avx512fintrin.h2
-rw-r--r--lib/Headers/avx512vlbwintrin.h2
-rw-r--r--lib/Headers/avx512vldqintrin.h2
-rw-r--r--lib/Headers/avx512vlintrin.h2
-rw-r--r--lib/Headers/avxintrin.h2
-rw-r--r--lib/Headers/bmi2intrin.h6
-rw-r--r--lib/Headers/bmiintrin.h6
-rw-r--r--lib/Headers/emmintrin.h8
-rw-r--r--lib/Headers/f16cintrin.h6
-rw-r--r--lib/Headers/fma4intrin.h8
-rw-r--r--lib/Headers/fmaintrin.h8
-rw-r--r--lib/Headers/immintrin.h74
-rw-r--r--lib/Headers/lzcntintrin.h6
-rw-r--r--lib/Headers/mm3dnow.h2
-rw-r--r--lib/Headers/mmintrin.h8
-rw-r--r--lib/Headers/module.modulemap30
-rw-r--r--lib/Headers/nmmintrin.h5
-rw-r--r--lib/Headers/pmmintrin.h8
-rw-r--r--lib/Headers/popcntintrin.h6
-rw-r--r--lib/Headers/rdseedintrin.h5
-rw-r--r--lib/Headers/rtmintrin.h2
-rw-r--r--lib/Headers/s390intrin.h4
-rw-r--r--lib/Headers/shaintrin.h6
-rw-r--r--lib/Headers/smmintrin.h15
-rw-r--r--lib/Headers/tbmintrin.h6
-rw-r--r--lib/Headers/tmmintrin.h8
-rw-r--r--lib/Headers/vecintrin.h8946
-rw-r--r--lib/Headers/wmmintrin.h9
-rw-r--r--lib/Headers/x86intrin.h24
-rw-r--r--lib/Headers/xmmintrin.h8
-rw-r--r--lib/Headers/xopintrin.h8
-rw-r--r--lib/Lex/HeaderSearch.cpp4
-rw-r--r--lib/Parse/ParseDecl.cpp8
-rw-r--r--lib/Parse/Parser.cpp8
-rw-r--r--lib/Sema/DeclSpec.cpp22
-rw-r--r--lib/Sema/SemaChecking.cpp165
-rw-r--r--lib/Sema/SemaDecl.cpp12
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp8
-rw-r--r--lib/Sema/SemaExpr.cpp115
-rw-r--r--lib/Sema/SemaExprCXX.cpp4
-rw-r--r--lib/Sema/SemaInit.cpp51
-rw-r--r--lib/Sema/SemaLookup.cpp2
-rw-r--r--lib/Sema/SemaOpenMP.cpp280
-rw-r--r--lib/Serialization/ASTReader.cpp20
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp4
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp3
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp12
-rw-r--r--lib/Serialization/ModuleManager.cpp6
-rw-r--r--test/Analysis/dead-stores.m2
-rw-r--r--test/CodeGen/builtins-ppc-altivec.c146
-rw-r--r--test/CodeGen/builtins-systemz-zvector-error.c576
-rw-r--r--test/CodeGen/builtins-systemz-zvector.c2967
-rw-r--r--test/CodeGen/integer-overflow.c7
-rw-r--r--test/CodeGen/le32-regparm.c1
-rw-r--r--test/CodeGen/long_double_fp128.cpp22
-rw-r--r--test/CodeGen/palignr.c4
-rw-r--r--test/CodeGen/x86_64-fp128.c115
-rw-r--r--test/CodeGen/zvector.c2798
-rw-r--r--test/CodeGenCXX/dllimport-rtti.cpp13
-rw-r--r--test/CodeGenCXX/thunks.cpp17
-rw-r--r--test/CodeGenObjC/Inputs/nsvalue-boxed-expressions-support.h63
-rw-r--r--test/CodeGenObjC/nsvalue-objc-boxable-ios-arc.m126
-rw-r--r--test/CodeGenObjC/nsvalue-objc-boxable-ios.m114
-rw-r--r--test/CodeGenObjC/nsvalue-objc-boxable-mac-arc.m130
-rw-r--r--test/CodeGenObjC/nsvalue-objc-boxable-mac.m118
-rw-r--r--test/CodeGenObjCXX/designated-initializers.mm36
-rw-r--r--test/CodeGenOpenCL/vector_shufflevector_valid.cl13
-rw-r--r--test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_clang_tree/mingw32/i686-w64-mingw32/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_clang_tree/mingw32/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/i686-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/minw32/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/i686-w64-mingw32/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/i686-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_opensuse_tree/usr/x86_64-w64-mingw32/sys-root/mingw/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/86_64-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/x86_64-w64-mingw32/include/.keep0
-rw-r--r--test/Driver/arm-ias-Wa.s64
-rw-r--r--test/Driver/cuda-options.cu14
-rw-r--r--test/Driver/linux-ld.c7
-rw-r--r--test/Driver/mingw.cpp59
-rw-r--r--test/Headers/pmmintrin.c12
-rw-r--r--test/Headers/x86intrin-2.c137
-rw-r--r--test/Headers/x86intrin.c121
-rw-r--r--test/Lexer/has_attribute_objc_boxable.m8
-rw-r--r--test/Lexer/has_feature_boxed_nsvalue_expressions.m8
-rw-r--r--test/Misc/cc1as-asm.s3
-rw-r--r--test/Modules/pch_container.m11
-rw-r--r--test/OpenMP/for_codegen.cpp79
-rw-r--r--test/OpenMP/for_loop_messages.cpp8
-rw-r--r--test/OpenMP/for_simd_loop_messages.cpp8
-rw-r--r--test/OpenMP/openmp_common.c5
-rw-r--r--test/OpenMP/parallel_copyin_codegen.cpp259
-rw-r--r--test/OpenMP/parallel_for_loop_messages.cpp8
-rw-r--r--test/OpenMP/parallel_for_simd_loop_messages.cpp8
-rw-r--r--test/OpenMP/simd_linear_messages.cpp3
-rw-r--r--test/OpenMP/simd_loop_messages.cpp2
-rw-r--r--test/PCH/objc_boxable.m17
-rw-r--r--test/PCH/objc_boxable_record.h5
-rw-r--r--test/PCH/objc_boxable_record_attr.h3
-rw-r--r--test/Preprocessor/predefined-arch-macros.c9
-rw-r--r--test/Sema/dllimport.c2
-rw-r--r--test/Sema/typo-correction.c6
-rw-r--r--test/Sema/zvector.c1009
-rw-r--r--test/SemaCXX/warn-pessmizing-move.cpp50
-rw-r--r--test/SemaCXX/warn-redundant-move.cpp72
-rw-r--r--test/SemaObjC/circular-container.m61
-rw-r--r--test/SemaObjC/comptypes-9.m2
-rw-r--r--test/SemaObjC/objc-boxed-expressions-nsvalue.m101
-rw-r--r--test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm118
-rw-r--r--test/SemaTemplate/crash-unparsed-exception.cpp17
-rw-r--r--tools/arcmt-test/arcmt-test.cpp6
-rw-r--r--tools/c-index-test/c-index-test.c11
-rw-r--r--tools/clang-check/CMakeLists.txt1
-rw-r--r--tools/clang-check/ClangCheck.cpp3
-rw-r--r--tools/driver/cc1_main.cpp8
-rw-r--r--tools/driver/cc1as_main.cpp3
-rw-r--r--tools/libclang/CIndex.cpp8
-rw-r--r--tools/libclang/CIndexer.h2
-rw-r--r--tools/libclang/CMakeLists.txt1
-rw-r--r--unittests/ASTMatchers/ASTMatchersTest.h2
-rw-r--r--utils/TableGen/ClangAttrEmitter.cpp6
-rw-r--r--www/analyzer/checker_dev_manual.html6
-rw-r--r--www/analyzer/installation.html2
-rw-r--r--www/analyzer/menu.html.incl4
-rw-r--r--www/analyzer/open_projects.html2
-rw-r--r--www/cxx_dr_status.html24
-rw-r--r--www/demo/index.cgi2
-rw-r--r--www/get_involved.html12
-rwxr-xr-xwww/make_cxx_dr_status4
-rw-r--r--www/menu.html.incl6
226 files changed, 22106 insertions, 1063 deletions
diff --git a/README.txt b/README.txt
index 474c67cf59ef..ada9ebc9e231 100644
--- a/README.txt
+++ b/README.txt
@@ -20,7 +20,7 @@ Information on the LLVM project: http://llvm.org/
If you have questions or comments about Clang, a great place to discuss them is
on the Clang development mailing list:
- http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev
+ http://lists.llvm.org/mailman/listinfo/cfe-dev
If you find a bug in Clang, please file it in the LLVM bug tracker:
http://llvm.org/bugs/
diff --git a/docs/AttributeReference.rst b/docs/AttributeReference.rst
index a763ddeaeb10..e282824952ba 100644
--- a/docs/AttributeReference.rst
+++ b/docs/AttributeReference.rst
@@ -1,13 +1,1760 @@
..
-------------------------------------------------------------------
NOTE: This file is automatically generated by running clang-tblgen
- -gen-attr-docs. Do not edit this file by hand!! The contents for
- this file are automatically generated by a server-side process.
-
- Please do not commit this file. The file exists for local testing
- purposes only.
+ -gen-attr-docs. Do not edit this file by hand!!
-------------------------------------------------------------------
===================
Attributes in Clang
-=================== \ No newline at end of file
+===================
+.. contents::
+ :local:
+
+Introduction
+============
+
+This page lists the attributes currently supported by Clang.
+
+AMD GPU Register Attributes
+===========================
+Clang supports attributes for controlling register usage on AMD GPU
+targets. These attributes may be attached to a kernel function
+definition and is an optimization hint to the backend for the maximum
+number of registers to use. This is useful in cases where register
+limited occupancy is known to be an important factor for the
+performance for the kernel.
+
+The semantics are as follows:
+
+- The backend will attempt to limit the number of used registers to
+ the specified value, but the exact number used is not
+ guaranteed. The number used may be rounded up to satisfy the
+ allocation requirements or ABI constraints of the subtarget. For
+ example, on Southern Islands VGPRs may only be allocated in
+ increments of 4, so requesting a limit of 39 VGPRs will really
+ attempt to use up to 40. Requesting more registers than the
+ subtarget supports will truncate to the maximum allowed. The backend
+ may also use fewer registers than requested whenever possible.
+
+- 0 implies the default no limit on register usage.
+
+- Ignored on older VLIW subtargets which did not have separate scalar
+ and vector registers, R600 through Northern Islands.
+
+amdgpu_num_sgpr
+---------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Clang supports the
+``__attribute__((amdgpu_num_sgpr(<num_registers>)))`` attribute on AMD
+Southern Islands GPUs and later for controlling the number of scalar
+registers. A typical value would be between 8 and 104 in increments of
+8.
+
+Due to common instruction constraints, an additional 2-4 SGPRs are
+typically required for internal use depending on features used. This
+value is a hint for the total number of SGPRs to use, and not the
+number of user SGPRs, so no special consideration needs to be given
+for these.
+
+
+amdgpu_num_vgpr
+---------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Clang supports the
+``__attribute__((amdgpu_num_vgpr(<num_registers>)))`` attribute on AMD
+Southern Islands GPUs and later for controlling the number of vector
+registers. A typical value would be between 4 and 256 in increments
+of 4.
+
+
+Function Attributes
+===================
+
+
+interrupt
+---------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Clang supports the GNU style ``__attribute__((interrupt("TYPE")))`` attribute on
+ARM targets. This attribute may be attached to a function definition and
+instructs the backend to generate appropriate function entry/exit code so that
+it can be used directly as an interrupt service routine.
+
+The parameter passed to the interrupt attribute is optional, but if
+provided it must be a string literal with one of the following values: "IRQ",
+"FIQ", "SWI", "ABORT", "UNDEF".
+
+The semantics are as follows:
+
+- If the function is AAPCS, Clang instructs the backend to realign the stack to
+ 8 bytes on entry. This is a general requirement of the AAPCS at public
+ interfaces, but may not hold when an exception is taken. Doing this allows
+ other AAPCS functions to be called.
+- If the CPU is M-class this is all that needs to be done since the architecture
+ itself is designed in such a way that functions obeying the normal AAPCS ABI
+ constraints are valid exception handlers.
+- If the CPU is not M-class, the prologue and epilogue are modified to save all
+ non-banked registers that are used, so that upon return the user-mode state
+ will not be corrupted. Note that to avoid unnecessary overhead, only
+ general-purpose (integer) registers are saved in this way. If VFP operations
+ are needed, that state must be saved manually.
+
+ Specifically, interrupt kinds other than "FIQ" will save all core registers
+ except "lr" and "sp". "FIQ" interrupts will save r0-r7.
+- If the CPU is not M-class, the return instruction is changed to one of the
+ canonical sequences permitted by the architecture for exception return. Where
+ possible the function itself will make the necessary "lr" adjustments so that
+ the "preferred return address" is selected.
+
+ Unfortunately the compiler is unable to make this guarantee for an "UNDEF"
+ handler, where the offset from "lr" to the preferred return address depends on
+ the execution state of the code which generated the exception. In this case
+ a sequence equivalent to "movs pc, lr" will be used.
+
+
+acquire_capability (acquire_shared_capability, clang::acquire_capability, clang::acquire_shared_capability)
+-----------------------------------------------------------------------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+Marks a function as acquiring a capability.
+
+
+assert_capability (assert_shared_capability, clang::assert_capability, clang::assert_shared_capability)
+-------------------------------------------------------------------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+Marks a function that dynamically tests whether a capability is held, and halts
+the program if it is not held.
+
+
+assume_aligned (gnu::assume_aligned)
+------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+Use ``__attribute__((assume_aligned(<alignment>[,<offset>]))`` on a function
+declaration to specify that the return value of the function (which must be a
+pointer type) has the specified offset, in bytes, from an address with the
+specified alignment. The offset is taken to be zero if omitted.
+
+.. code-block:: c++
+
+ // The returned pointer value has 32-byte alignment.
+ void *a() __attribute__((assume_aligned (32)));
+
+ // The returned pointer value is 4 bytes greater than an address having
+ // 32-byte alignment.
+ void *b() __attribute__((assume_aligned (32, 4)));
+
+Note that this attribute provides information to the compiler regarding a
+condition that the code already ensures is true. It does not cause the compiler
+to enforce the provided alignment assumption.
+
+
+availability
+------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+The ``availability`` attribute can be placed on declarations to describe the
+lifecycle of that declaration relative to operating system versions. Consider
+the function declaration for a hypothetical function ``f``:
+
+.. code-block:: c++
+
+ void f(void) __attribute__((availability(macosx,introduced=10.4,deprecated=10.6,obsoleted=10.7)));
+
+The availability attribute states that ``f`` was introduced in Mac OS X 10.4,
+deprecated in Mac OS X 10.6, and obsoleted in Mac OS X 10.7. This information
+is used by Clang to determine when it is safe to use ``f``: for example, if
+Clang is instructed to compile code for Mac OS X 10.5, a call to ``f()``
+succeeds. If Clang is instructed to compile code for Mac OS X 10.6, the call
+succeeds but Clang emits a warning specifying that the function is deprecated.
+Finally, if Clang is instructed to compile code for Mac OS X 10.7, the call
+fails because ``f()`` is no longer available.
+
+The availability attribute is a comma-separated list starting with the
+platform name and then including clauses specifying important milestones in the
+declaration's lifetime (in any order) along with additional information. Those
+clauses can be:
+
+introduced=\ *version*
+ The first version in which this declaration was introduced.
+
+deprecated=\ *version*
+ The first version in which this declaration was deprecated, meaning that
+ users should migrate away from this API.
+
+obsoleted=\ *version*
+ The first version in which this declaration was obsoleted, meaning that it
+ was removed completely and can no longer be used.
+
+unavailable
+ This declaration is never available on this platform.
+
+message=\ *string-literal*
+ Additional message text that Clang will provide when emitting a warning or
+ error about use of a deprecated or obsoleted declaration. Useful to direct
+ users to replacement APIs.
+
+Multiple availability attributes can be placed on a declaration, which may
+correspond to different platforms. Only the availability attribute with the
+platform corresponding to the target platform will be used; any others will be
+ignored. If no availability attribute specifies availability for the current
+target platform, the availability attributes are ignored. Supported platforms
+are:
+
+``ios``
+ Apple's iOS operating system. The minimum deployment target is specified by
+ the ``-mios-version-min=*version*`` or ``-miphoneos-version-min=*version*``
+ command-line arguments.
+
+``macosx``
+ Apple's Mac OS X operating system. The minimum deployment target is
+ specified by the ``-mmacosx-version-min=*version*`` command-line argument.
+
+A declaration can be used even when deploying back to a platform version prior
+to when the declaration was introduced. When this happens, the declaration is
+`weakly linked
+<https://developer.apple.com/library/mac/#documentation/MacOSX/Conceptual/BPFrameworks/Concepts/WeakLinking.html>`_,
+as if the ``weak_import`` attribute were added to the declaration. A
+weakly-linked declaration may or may not be present a run-time, and a program
+can determine whether the declaration is present by checking whether the
+address of that declaration is non-NULL.
+
+If there are multiple declarations of the same entity, the availability
+attributes must either match on a per-platform basis or later
+declarations must not have availability attributes for that
+platform. For example:
+
+.. code-block:: c
+
+ void g(void) __attribute__((availability(macosx,introduced=10.4)));
+ void g(void) __attribute__((availability(macosx,introduced=10.4))); // okay, matches
+ void g(void) __attribute__((availability(ios,introduced=4.0))); // okay, adds a new platform
+ void g(void); // okay, inherits both macosx and ios availability from above.
+ void g(void) __attribute__((availability(macosx,introduced=10.5))); // error: mismatch
+
+When one method overrides another, the overriding method can be more widely available than the overridden method, e.g.,:
+
+.. code-block:: objc
+
+ @interface A
+ - (id)method __attribute__((availability(macosx,introduced=10.4)));
+ - (id)method2 __attribute__((availability(macosx,introduced=10.4)));
+ @end
+
+ @interface B : A
+ - (id)method __attribute__((availability(macosx,introduced=10.3))); // okay: method moved into base class later
+ - (id)method __attribute__((availability(macosx,introduced=10.5))); // error: this method was available via the base class in 10.4
+ @end
+
+
+_Noreturn
+---------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+A function declared as ``_Noreturn`` shall not return to its caller. The
+compiler will generate a diagnostic for a function declared as ``_Noreturn``
+that appears to be capable of returning to its caller.
+
+
+noreturn
+--------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","X","","", ""
+
+A function declared as ``[[noreturn]]`` shall not return to its caller. The
+compiler will generate a diagnostic for a function declared as ``[[noreturn]]``
+that appears to be capable of returning to its caller.
+
+
+carries_dependency
+------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+The ``carries_dependency`` attribute specifies dependency propagation into and
+out of functions.
+
+When specified on a function or Objective-C method, the ``carries_dependency``
+attribute means that the return value carries a dependency out of the function,
+so that the implementation need not constrain ordering upon return from that
+function. Implementations of the function and its caller may choose to preserve
+dependencies instead of emitting memory ordering instructions such as fences.
+
+Note, this attribute does not change the meaning of the program, but may result
+in generation of more efficient code.
+
+
+enable_if
+---------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+.. Note:: Some features of this attribute are experimental. The meaning of
+ multiple enable_if attributes on a single declaration is subject to change in
+ a future version of clang. Also, the ABI is not standardized and the name
+ mangling may change in future versions. To avoid that, use asm labels.
+
+The ``enable_if`` attribute can be placed on function declarations to control
+which overload is selected based on the values of the function's arguments.
+When combined with the ``overloadable`` attribute, this feature is also
+available in C.
+
+.. code-block:: c++
+
+ int isdigit(int c);
+ int isdigit(int c) __attribute__((enable_if(c <= -1 || c > 255, "chosen when 'c' is out of range"))) __attribute__((unavailable("'c' must have the value of an unsigned char or EOF")));
+
+ void foo(char c) {
+ isdigit(c);
+ isdigit(10);
+ isdigit(-10); // results in a compile-time error.
+ }
+
+The enable_if attribute takes two arguments, the first is an expression written
+in terms of the function parameters, the second is a string explaining why this
+overload candidate could not be selected to be displayed in diagnostics. The
+expression is part of the function signature for the purposes of determining
+whether it is a redeclaration (following the rules used when determining
+whether a C++ template specialization is ODR-equivalent), but is not part of
+the type.
+
+The enable_if expression is evaluated as if it were the body of a
+bool-returning constexpr function declared with the arguments of the function
+it is being applied to, then called with the parameters at the call site. If the
+result is false or could not be determined through constant expression
+evaluation, then this overload will not be chosen and the provided string may
+be used in a diagnostic if the compile fails as a result.
+
+Because the enable_if expression is an unevaluated context, there are no global
+state changes, nor the ability to pass information from the enable_if
+expression to the function body. For example, suppose we want calls to
+strnlen(strbuf, maxlen) to resolve to strnlen_chk(strbuf, maxlen, size of
+strbuf) only if the size of strbuf can be determined:
+
+.. code-block:: c++
+
+ __attribute__((always_inline))
+ static inline size_t strnlen(const char *s, size_t maxlen)
+ __attribute__((overloadable))
+ __attribute__((enable_if(__builtin_object_size(s, 0) != -1))),
+ "chosen when the buffer size is known but 'maxlen' is not")))
+ {
+ return strnlen_chk(s, maxlen, __builtin_object_size(s, 0));
+ }
+
+Multiple enable_if attributes may be applied to a single declaration. In this
+case, the enable_if expressions are evaluated from left to right in the
+following manner. First, the candidates whose enable_if expressions evaluate to
+false or cannot be evaluated are discarded. If the remaining candidates do not
+share ODR-equivalent enable_if expressions, the overload resolution is
+ambiguous. Otherwise, enable_if overload resolution continues with the next
+enable_if attribute on the candidates that have not been discarded and have
+remaining enable_if attributes. In this way, we pick the most specific
+overload out of a number of viable overloads using enable_if.
+
+.. code-block:: c++
+
+ void f() __attribute__((enable_if(true, ""))); // #1
+ void f() __attribute__((enable_if(true, ""))) __attribute__((enable_if(true, ""))); // #2
+
+ void g(int i, int j) __attribute__((enable_if(i, ""))); // #1
+ void g(int i, int j) __attribute__((enable_if(j, ""))) __attribute__((enable_if(true))); // #2
+
+In this example, a call to f() is always resolved to #2, as the first enable_if
+expression is ODR-equivalent for both declarations, but #1 does not have another
+enable_if expression to continue evaluating, so the next round of evaluation has
+only a single candidate. In a call to g(1, 1), the call is ambiguous even though
+#2 has more enable_if attributes, because the first enable_if expressions are
+not ODR-equivalent.
+
+Query for this feature with ``__has_attribute(enable_if)``.
+
+
+flatten (gnu::flatten)
+----------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+The ``flatten`` attribute causes calls within the attributed function to
+be inlined unless it is impossible to do so, for example if the body of the
+callee is unavailable or if the callee has the ``noinline`` attribute.
+
+
+format (gnu::format)
+--------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+Clang supports the ``format`` attribute, which indicates that the function
+accepts a ``printf`` or ``scanf``-like format string and corresponding
+arguments or a ``va_list`` that contains these arguments.
+
+Please see `GCC documentation about format attribute
+<http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html>`_ to find details
+about attribute syntax.
+
+Clang implements two kinds of checks with this attribute.
+
+#. Clang checks that the function with the ``format`` attribute is called with
+ a format string that uses format specifiers that are allowed, and that
+ arguments match the format string. This is the ``-Wformat`` warning, it is
+ on by default.
+
+#. Clang checks that the format string argument is a literal string. This is
+ the ``-Wformat-nonliteral`` warning, it is off by default.
+
+ Clang implements this mostly the same way as GCC, but there is a difference
+ for functions that accept a ``va_list`` argument (for example, ``vprintf``).
+ GCC does not emit ``-Wformat-nonliteral`` warning for calls to such
+ functions. Clang does not warn if the format string comes from a function
+ parameter, where the function is annotated with a compatible attribute,
+ otherwise it warns. For example:
+
+ .. code-block:: c
+
+ __attribute__((__format__ (__scanf__, 1, 3)))
+ void foo(const char* s, char *buf, ...) {
+ va_list ap;
+ va_start(ap, buf);
+
+ vprintf(s, ap); // warning: format string is not a string literal
+ }
+
+ In this case we warn because ``s`` contains a format string for a
+ ``scanf``-like function, but it is passed to a ``printf``-like function.
+
+ If the attribute is removed, clang still warns, because the format string is
+ not a string literal.
+
+ Another example:
+
+ .. code-block:: c
+
+ __attribute__((__format__ (__printf__, 1, 3)))
+ void foo(const char* s, char *buf, ...) {
+ va_list ap;
+ va_start(ap, buf);
+
+ vprintf(s, ap); // warning
+ }
+
+ In this case Clang does not warn because the format string ``s`` and
+ the corresponding arguments are annotated. If the arguments are
+ incorrect, the caller of ``foo`` will receive a warning.
+
+
+noduplicate (clang::noduplicate)
+--------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+The ``noduplicate`` attribute can be placed on function declarations to control
+whether function calls to this function can be duplicated or not as a result of
+optimizations. This is required for the implementation of functions with
+certain special requirements, like the OpenCL "barrier" function, that might
+need to be run concurrently by all the threads that are executing in lockstep
+on the hardware. For example this attribute applied on the function
+"nodupfunc" in the code below avoids that:
+
+.. code-block:: c
+
+ void nodupfunc() __attribute__((noduplicate));
+ // Setting it as a C++11 attribute is also valid
+ // void nodupfunc() [[clang::noduplicate]];
+ void foo();
+ void bar();
+
+ nodupfunc();
+ if (a > n) {
+ foo();
+ } else {
+ bar();
+ }
+
+gets possibly modified by some optimizations into code similar to this:
+
+.. code-block:: c
+
+ if (a > n) {
+ nodupfunc();
+ foo();
+ } else {
+ nodupfunc();
+ bar();
+ }
+
+where the call to "nodupfunc" is duplicated and sunk into the two branches
+of the condition.
+
+
+no_sanitize (clang::no_sanitize)
+--------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+Use the ``no_sanitize`` attribute on a function declaration to specify
+that a particular instrumentation or set of instrumentations should not be
+applied to that function. The attribute takes a list of string literals,
+which have the same meaning as values accepted by the ``-fno-sanitize=``
+flag. For example, ``__attribute__((no_sanitize("address", "thread")))``
+specifies that AddressSanitizer and ThreadSanitizer should not be applied
+to the function.
+
+See :ref:`Controlling Code Generation <controlling-code-generation>` for a
+full list of supported sanitizer flags.
+
+
+no_sanitize_address (no_address_safety_analysis, gnu::no_address_safety_analysis, gnu::no_sanitize_address)
+-----------------------------------------------------------------------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+.. _langext-address_sanitizer:
+
+Use ``__attribute__((no_sanitize_address))`` on a function declaration to
+specify that address safety instrumentation (e.g. AddressSanitizer) should
+not be applied to that function.
+
+
+no_sanitize_thread
+------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+.. _langext-thread_sanitizer:
+
+Use ``__attribute__((no_sanitize_thread))`` on a function declaration to
+specify that checks for data races on plain (non-atomic) memory accesses should
+not be inserted by ThreadSanitizer. The function is still instrumented by the
+tool to avoid false positives and provide meaningful stack traces.
+
+
+no_sanitize_memory
+------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+.. _langext-memory_sanitizer:
+
+Use ``__attribute__((no_sanitize_memory))`` on a function declaration to
+specify that checks for uninitialized memory should not be inserted
+(e.g. by MemorySanitizer). The function may still be instrumented by the tool
+to avoid false positives in other places.
+
+
+no_split_stack (gnu::no_split_stack)
+------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+The ``no_split_stack`` attribute disables the emission of the split stack
+preamble for a particular function. It has no effect if ``-fsplit-stack``
+is not specified.
+
+
+objc_boxable
+------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Structs and unions marked with the ``objc_boxable`` attribute can be used
+with the Objective-C boxed expression syntax, ``@(...)``.
+
+**Usage**: ``__attribute__((objc_boxable))``. This attribute
+can only be placed on a declaration of a trivially-copyable struct or union:
+
+.. code-block:: objc
+
+ struct __attribute__((objc_boxable)) some_struct {
+ int i;
+ };
+ union __attribute__((objc_boxable)) some_union {
+ int i;
+ float f;
+ };
+ typedef struct __attribute__((objc_boxable)) _some_struct some_struct;
+
+ // ...
+
+ some_struct ss;
+ NSValue *boxed = @(ss);
+
+
+objc_method_family
+------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Many methods in Objective-C have conventional meanings determined by their
+selectors. It is sometimes useful to be able to mark a method as having a
+particular conventional meaning despite not having the right selector, or as
+not having the conventional meaning that its selector would suggest. For these
+use cases, we provide an attribute to specifically describe the "method family"
+that a method belongs to.
+
+**Usage**: ``__attribute__((objc_method_family(X)))``, where ``X`` is one of
+``none``, ``alloc``, ``copy``, ``init``, ``mutableCopy``, or ``new``. This
+attribute can only be placed at the end of a method declaration:
+
+.. code-block:: objc
+
+ - (NSString *)initMyStringValue __attribute__((objc_method_family(none)));
+
+Users who do not wish to change the conventional meaning of a method, and who
+merely want to document its non-standard retain and release semantics, should
+use the retaining behavior attributes (``ns_returns_retained``,
+``ns_returns_not_retained``, etc).
+
+Query for this feature with ``__has_attribute(objc_method_family)``.
+
+
+objc_requires_super
+-------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Some Objective-C classes allow a subclass to override a particular method in a
+parent class but expect that the overriding method also calls the overridden
+method in the parent class. For these cases, we provide an attribute to
+designate that a method requires a "call to ``super``" in the overriding
+method in the subclass.
+
+**Usage**: ``__attribute__((objc_requires_super))``. This attribute can only
+be placed at the end of a method declaration:
+
+.. code-block:: objc
+
+ - (void)foo __attribute__((objc_requires_super));
+
+This attribute can only be applied the method declarations within a class, and
+not a protocol. Currently this attribute does not enforce any placement of
+where the call occurs in the overriding method (such as in the case of
+``-dealloc`` where the call must appear at the end). It checks only that it
+exists.
+
+Note that on both OS X and iOS that the Foundation framework provides a
+convenience macro ``NS_REQUIRES_SUPER`` that provides syntactic sugar for this
+attribute:
+
+.. code-block:: objc
+
+ - (void)foo NS_REQUIRES_SUPER;
+
+This macro is conditionally defined depending on the compiler's support for
+this attribute. If the compiler does not support the attribute the macro
+expands to nothing.
+
+Operationally, when a method has this annotation the compiler will warn if the
+implementation of an override in a subclass does not call super. For example:
+
+.. code-block:: objc
+
+ warning: method possibly missing a [super AnnotMeth] call
+ - (void) AnnotMeth{};
+ ^
+
+
+objc_runtime_name
+-----------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+By default, the Objective-C interface or protocol identifier is used
+in the metadata name for that object. The `objc_runtime_name`
+attribute allows annotated interfaces or protocols to use the
+specified string argument in the object's metadata name instead of the
+default name.
+
+**Usage**: ``__attribute__((objc_runtime_name("MyLocalName")))``. This attribute
+can only be placed before an @protocol or @interface declaration:
+
+.. code-block:: objc
+
+ __attribute__((objc_runtime_name("MyLocalName")))
+ @interface Message
+ @end
+
+
+optnone (clang::optnone)
+------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+The ``optnone`` attribute suppresses essentially all optimizations
+on a function or method, regardless of the optimization level applied to
+the compilation unit as a whole. This is particularly useful when you
+need to debug a particular function, but it is infeasible to build the
+entire application without optimization. Avoiding optimization on the
+specified function can improve the quality of the debugging information
+for that function.
+
+This attribute is incompatible with the ``always_inline`` and ``minsize``
+attributes.
+
+
+overloadable
+------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Clang provides support for C++ function overloading in C. Function overloading
+in C is introduced using the ``overloadable`` attribute. For example, one
+might provide several overloaded versions of a ``tgsin`` function that invokes
+the appropriate standard function computing the sine of a value with ``float``,
+``double``, or ``long double`` precision:
+
+.. code-block:: c
+
+ #include <math.h>
+ float __attribute__((overloadable)) tgsin(float x) { return sinf(x); }
+ double __attribute__((overloadable)) tgsin(double x) { return sin(x); }
+ long double __attribute__((overloadable)) tgsin(long double x) { return sinl(x); }
+
+Given these declarations, one can call ``tgsin`` with a ``float`` value to
+receive a ``float`` result, with a ``double`` to receive a ``double`` result,
+etc. Function overloading in C follows the rules of C++ function overloading
+to pick the best overload given the call arguments, with a few C-specific
+semantics:
+
+* Conversion from ``float`` or ``double`` to ``long double`` is ranked as a
+ floating-point promotion (per C99) rather than as a floating-point conversion
+ (as in C++).
+
+* A conversion from a pointer of type ``T*`` to a pointer of type ``U*`` is
+ considered a pointer conversion (with conversion rank) if ``T`` and ``U`` are
+ compatible types.
+
+* A conversion from type ``T`` to a value of type ``U`` is permitted if ``T``
+ and ``U`` are compatible types. This conversion is given "conversion" rank.
+
+The declaration of ``overloadable`` functions is restricted to function
+declarations and definitions. Most importantly, if any function with a given
+name is given the ``overloadable`` attribute, then all function declarations
+and definitions with that name (and in that scope) must have the
+``overloadable`` attribute. This rule even applies to redeclarations of
+functions whose original declaration had the ``overloadable`` attribute, e.g.,
+
+.. code-block:: c
+
+ int f(int) __attribute__((overloadable));
+ float f(float); // error: declaration of "f" must have the "overloadable" attribute
+
+ int g(int) __attribute__((overloadable));
+ int g(int) { } // error: redeclaration of "g" must also have the "overloadable" attribute
+
+Functions marked ``overloadable`` must have prototypes. Therefore, the
+following code is ill-formed:
+
+.. code-block:: c
+
+ int h() __attribute__((overloadable)); // error: h does not have a prototype
+
+However, ``overloadable`` functions are allowed to use a ellipsis even if there
+are no named parameters (as is permitted in C++). This feature is particularly
+useful when combined with the ``unavailable`` attribute:
+
+.. code-block:: c++
+
+ void honeypot(...) __attribute__((overloadable, unavailable)); // calling me is an error
+
+Functions declared with the ``overloadable`` attribute have their names mangled
+according to the same rules as C++ function names. For example, the three
+``tgsin`` functions in our motivating example get the mangled names
+``_Z5tgsinf``, ``_Z5tgsind``, and ``_Z5tgsine``, respectively. There are two
+caveats to this use of name mangling:
+
+* Future versions of Clang may change the name mangling of functions overloaded
+ in C, so you should not depend on an specific mangling. To be completely
+ safe, we strongly urge the use of ``static inline`` with ``overloadable``
+ functions.
+
+* The ``overloadable`` attribute has almost no meaning when used in C++,
+ because names will already be mangled and functions are already overloadable.
+ However, when an ``overloadable`` function occurs within an ``extern "C"``
+ linkage specification, it's name *will* be mangled in the same way as it
+ would in C.
+
+Query for this feature with ``__has_extension(attribute_overloadable)``.
+
+
+release_capability (release_shared_capability, clang::release_capability, clang::release_shared_capability)
+-----------------------------------------------------------------------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+Marks a function as releasing a capability.
+
+
+target (gnu::target)
+--------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+Clang supports the GNU style ``__attribute__((target("OPTIONS")))`` attribute.
+This attribute may be attached to a function definition and instructs
+the backend to use different code generation options than were passed on the
+command line.
+
+The current set of options correspond to the existing "subtarget features" for
+the target with or without a "-mno-" in front corresponding to the absence
+of the feature, as well as ``arch="CPU"`` which will change the default "CPU"
+for the function.
+
+Example "subtarget features" from the x86 backend include: "mmx", "sse", "sse4.2",
+"avx", "xop" and largely correspond to the machine specific options handled by
+the front end.
+
+
+try_acquire_capability (try_acquire_shared_capability, clang::try_acquire_capability, clang::try_acquire_shared_capability)
+---------------------------------------------------------------------------------------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+Marks a function that attempts to acquire a capability. This function may fail to
+actually acquire the capability; they accept a Boolean value determining
+whether acquiring the capability means success (true), or failing to acquire
+the capability means success (false).
+
+
+Variable Attributes
+===================
+
+
+init_seg
+--------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","", "X"
+
+The attribute applied by ``pragma init_seg()`` controls the section into
+which global initialization function pointers are emitted. It is only
+available with ``-fms-extensions``. Typically, this function pointer is
+emitted into ``.CRT$XCU`` on Windows. The user can change the order of
+initialization by using a different section name with the same
+``.CRT$XC`` prefix and a suffix that sorts lexicographically before or
+after the standard ``.CRT$XCU`` sections. See the init_seg_
+documentation on MSDN for more information.
+
+.. _init_seg: http://msdn.microsoft.com/en-us/library/7977wcck(v=vs.110).aspx
+
+
+section (gnu::section, __declspec(allocate))
+--------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","X","", ""
+
+The ``section`` attribute allows you to specify a specific section a
+global variable or function should be in after translation.
+
+
+tls_model (gnu::tls_model)
+--------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+The ``tls_model`` attribute allows you to specify which thread-local storage
+model to use. It accepts the following strings:
+
+* global-dynamic
+* local-dynamic
+* initial-exec
+* local-exec
+
+TLS models are mutually exclusive.
+
+
+thread
+------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","X","", ""
+
+The ``__declspec(thread)`` attribute declares a variable with thread local
+storage. It is available under the ``-fms-extensions`` flag for MSVC
+compatibility. See the documentation for `__declspec(thread)`_ on MSDN.
+
+.. _`__declspec(thread)`: http://msdn.microsoft.com/en-us/library/9w1sdazb.aspx
+
+In Clang, ``__declspec(thread)`` is generally equivalent in functionality to the
+GNU ``__thread`` keyword. The variable must not have a destructor and must have
+a constant initializer, if any. The attribute only applies to variables
+declared with static storage duration, such as globals, class static data
+members, and static locals.
+
+
+Type Attributes
+===============
+
+
+align_value
+-----------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+The align_value attribute can be added to the typedef of a pointer type or the
+declaration of a variable of pointer or reference type. It specifies that the
+pointer will point to, or the reference will bind to, only objects with at
+least the provided alignment. This alignment value must be some positive power
+of 2.
+
+ .. code-block:: c
+
+ typedef double * aligned_double_ptr __attribute__((align_value(64)));
+ void foo(double & x __attribute__((align_value(128)),
+ aligned_double_ptr y) { ... }
+
+If the pointer value does not have the specified alignment at runtime, the
+behavior of the program is undefined.
+
+
+flag_enum
+---------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+This attribute can be added to an enumerator to signal to the compiler that it
+is intended to be used as a flag type. This will cause the compiler to assume
+that the range of the type includes all of the values that you can get by
+manipulating bits of the enumerator when issuing warnings.
+
+
+__single_inhertiance, __multiple_inheritance, __virtual_inheritance
+-------------------------------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+This collection of keywords is enabled under ``-fms-extensions`` and controls
+the pointer-to-member representation used on ``*-*-win32`` targets.
+
+The ``*-*-win32`` targets utilize a pointer-to-member representation which
+varies in size and alignment depending on the definition of the underlying
+class.
+
+However, this is problematic when a forward declaration is only available and
+no definition has been made yet. In such cases, Clang is forced to utilize the
+most general representation that is available to it.
+
+These keywords make it possible to use a pointer-to-member representation other
+than the most general one regardless of whether or not the definition will ever
+be present in the current translation unit.
+
+This family of keywords belong between the ``class-key`` and ``class-name``:
+
+.. code-block:: c++
+
+ struct __single_inheritance S;
+ int S::*i;
+ struct S {};
+
+This keyword can be applied to class templates but only has an effect when used
+on full specializations:
+
+.. code-block:: c++
+
+ template <typename T, typename U> struct __single_inheritance A; // warning: inheritance model ignored on primary template
+ template <typename T> struct __multiple_inheritance A<T, T>; // warning: inheritance model ignored on partial specialization
+ template <> struct __single_inheritance A<int, float>;
+
+Note that choosing an inheritance model less general than strictly necessary is
+an error:
+
+.. code-block:: c++
+
+ struct __multiple_inheritance S; // error: inheritance model does not match definition
+ int S::*i;
+ struct S {};
+
+
+novtable
+--------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","X","", ""
+
+This attribute can be added to a class declaration or definition to signal to
+the compiler that constructors and destructors will not reference the virtual
+function table.
+
+
+Statement Attributes
+====================
+
+
+fallthrough (clang::fallthrough)
+--------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","X","","", ""
+
+The ``clang::fallthrough`` attribute is used along with the
+``-Wimplicit-fallthrough`` argument to annotate intentional fall-through
+between switch labels. It can only be applied to a null statement placed at a
+point of execution between any statement and the next switch label. It is
+common to mark these places with a specific comment, but this attribute is
+meant to replace comments with a more strict annotation, which can be checked
+by the compiler. This attribute doesn't change semantics of the code and can
+be used wherever an intended fall-through occurs. It is designed to mimic
+control-flow statements like ``break;``, so it can be placed in most places
+where ``break;`` can, but only if there are no statements on the execution path
+between it and the next switch label.
+
+Here is an example:
+
+.. code-block:: c++
+
+ // compile with -Wimplicit-fallthrough
+ switch (n) {
+ case 22:
+ case 33: // no warning: no statements between case labels
+ f();
+ case 44: // warning: unannotated fall-through
+ g();
+ [[clang::fallthrough]];
+ case 55: // no warning
+ if (x) {
+ h();
+ break;
+ }
+ else {
+ i();
+ [[clang::fallthrough]];
+ }
+ case 66: // no warning
+ p();
+ [[clang::fallthrough]]; // warning: fallthrough annotation does not
+ // directly precede case label
+ q();
+ case 77: // warning: unannotated fall-through
+ r();
+ }
+
+
+#pragma clang loop
+------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","", "X"
+
+The ``#pragma clang loop`` directive allows loop optimization hints to be
+specified for the subsequent loop. The directive allows vectorization,
+interleaving, and unrolling to be enabled or disabled. Vector width as well
+as interleave and unrolling count can be manually specified. See
+`language extensions
+<http://clang.llvm.org/docs/LanguageExtensions.html#extensions-for-loop-hint-optimizations>`_
+for details.
+
+
+#pragma unroll, #pragma nounroll
+--------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","", "X"
+
+Loop unrolling optimization hints can be specified with ``#pragma unroll`` and
+``#pragma nounroll``. The pragma is placed immediately before a for, while,
+do-while, or c++11 range-based for loop.
+
+Specifying ``#pragma unroll`` without a parameter directs the loop unroller to
+attempt to fully unroll the loop if the trip count is known at compile time:
+
+.. code-block:: c++
+
+ #pragma unroll
+ for (...) {
+ ...
+ }
+
+Specifying the optional parameter, ``#pragma unroll _value_``, directs the
+unroller to unroll the loop ``_value_`` times. The parameter may optionally be
+enclosed in parentheses:
+
+.. code-block:: c++
+
+ #pragma unroll 16
+ for (...) {
+ ...
+ }
+
+ #pragma unroll(16)
+ for (...) {
+ ...
+ }
+
+Specifying ``#pragma nounroll`` indicates that the loop should not be unrolled:
+
+.. code-block:: c++
+
+ #pragma nounroll
+ for (...) {
+ ...
+ }
+
+``#pragma unroll`` and ``#pragma unroll _value_`` have identical semantics to
+``#pragma clang loop unroll(full)`` and
+``#pragma clang loop unroll_count(_value_)`` respectively. ``#pragma nounroll``
+is equivalent to ``#pragma clang loop unroll(disable)``. See
+`language extensions
+<http://clang.llvm.org/docs/LanguageExtensions.html#extensions-for-loop-hint-optimizations>`_
+for further details including limitations of the unroll hints.
+
+
+Calling Conventions
+===================
+Clang supports several different calling conventions, depending on the target
+platform and architecture. The calling convention used for a function determines
+how parameters are passed, how results are returned to the caller, and other
+low-level details of calling a function.
+
+fastcall (gnu::fastcall, __fastcall, _fastcall)
+-----------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","X", ""
+
+On 32-bit x86 targets, this attribute changes the calling convention of a
+function to use ECX and EDX as register parameters and clear parameters off of
+the stack on return. This convention does not support variadic calls or
+unprototyped functions in C, and has no effect on x86_64 targets. This calling
+convention is supported primarily for compatibility with existing code. Users
+seeking register parameters should use the ``regparm`` attribute, which does
+not require callee-cleanup. See the documentation for `__fastcall`_ on MSDN.
+
+.. _`__fastcall`: http://msdn.microsoft.com/en-us/library/6xa169sk.aspx
+
+
+ms_abi (gnu::ms_abi)
+--------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+On non-Windows x86_64 targets, this attribute changes the calling convention of
+a function to match the default convention used on Windows x86_64. This
+attribute has no effect on Windows targets or non-x86_64 targets.
+
+
+pcs (gnu::pcs)
+--------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+On ARM targets, this attribute can be used to select calling conventions
+similar to ``stdcall`` on x86. Valid parameter values are "aapcs" and
+"aapcs-vfp".
+
+
+regparm (gnu::regparm)
+----------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+On 32-bit x86 targets, the regparm attribute causes the compiler to pass
+the first three integer parameters in EAX, EDX, and ECX instead of on the
+stack. This attribute has no effect on variadic functions, and all parameters
+are passed via the stack as normal.
+
+
+stdcall (gnu::stdcall, __stdcall, _stdcall)
+-------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","X", ""
+
+On 32-bit x86 targets, this attribute changes the calling convention of a
+function to clear parameters off of the stack on return. This convention does
+not support variadic calls or unprototyped functions in C, and has no effect on
+x86_64 targets. This calling convention is used widely by the Windows API and
+COM applications. See the documentation for `__stdcall`_ on MSDN.
+
+.. _`__stdcall`: http://msdn.microsoft.com/en-us/library/zxk0tw93.aspx
+
+
+thiscall (gnu::thiscall, __thiscall, _thiscall)
+-----------------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","X", ""
+
+On 32-bit x86 targets, this attribute changes the calling convention of a
+function to use ECX for the first parameter (typically the implicit ``this``
+parameter of C++ methods) and clear parameters off of the stack on return. This
+convention does not support variadic calls or unprototyped functions in C, and
+has no effect on x86_64 targets. See the documentation for `__thiscall`_ on
+MSDN.
+
+.. _`__thiscall`: http://msdn.microsoft.com/en-us/library/ek8tkfbw.aspx
+
+
+vectorcall (__vectorcall, _vectorcall)
+--------------------------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","X", ""
+
+On 32-bit x86 *and* x86_64 targets, this attribute changes the calling
+convention of a function to pass vector parameters in SSE registers.
+
+On 32-bit x86 targets, this calling convention is similar to ``__fastcall``.
+The first two integer parameters are passed in ECX and EDX. Subsequent integer
+parameters are passed in memory, and callee clears the stack. On x86_64
+targets, the callee does *not* clear the stack, and integer parameters are
+passed in RCX, RDX, R8, and R9 as is done for the default Windows x64 calling
+convention.
+
+On both 32-bit x86 and x86_64 targets, vector and floating point arguments are
+passed in XMM0-XMM5. Homogenous vector aggregates of up to four elements are
+passed in sequential SSE registers if enough are available. If AVX is enabled,
+256 bit vectors are passed in YMM0-YMM5. Any vector or aggregate type that
+cannot be passed in registers for any reason is passed by reference, which
+allows the caller to align the parameter memory.
+
+See the documentation for `__vectorcall`_ on MSDN for more details.
+
+.. _`__vectorcall`: http://msdn.microsoft.com/en-us/library/dn375768.aspx
+
+
+Consumed Annotation Checking
+============================
+Clang supports additional attributes for checking basic resource management
+properties, specifically for unique objects that have a single owning reference.
+The following attributes are currently supported, although **the implementation
+for these annotations is currently in development and are subject to change.**
+
+callable_when
+-------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Use ``__attribute__((callable_when(...)))`` to indicate what states a method
+may be called in. Valid states are unconsumed, consumed, or unknown. Each
+argument to this attribute must be a quoted string. E.g.:
+
+``__attribute__((callable_when("unconsumed", "unknown")))``
+
+
+consumable
+----------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Each ``class`` that uses any of the typestate annotations must first be marked
+using the ``consumable`` attribute. Failure to do so will result in a warning.
+
+This attribute accepts a single parameter that must be one of the following:
+``unknown``, ``consumed``, or ``unconsumed``.
+
+
+param_typestate
+---------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+This attribute specifies expectations about function parameters. Calls to an
+function with annotated parameters will issue a warning if the corresponding
+argument isn't in the expected state. The attribute is also used to set the
+initial state of the parameter when analyzing the function's body.
+
+
+return_typestate
+----------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+The ``return_typestate`` attribute can be applied to functions or parameters.
+When applied to a function the attribute specifies the state of the returned
+value. The function's body is checked to ensure that it always returns a value
+in the specified state. On the caller side, values returned by the annotated
+function are initialized to the given state.
+
+When applied to a function parameter it modifies the state of an argument after
+a call to the function returns. The function's body is checked to ensure that
+the parameter is in the expected state before returning.
+
+
+set_typestate
+-------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Annotate methods that transition an object into a new state with
+``__attribute__((set_typestate(new_state)))``. The new state must be
+unconsumed, consumed, or unknown.
+
+
+test_typestate
+--------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Use ``__attribute__((test_typestate(tested_state)))`` to indicate that a method
+returns true if the object is in the specified state..
+
+
+Type Safety Checking
+====================
+Clang supports additional attributes to enable checking type safety properties
+that can't be enforced by the C type system. Use cases include:
+
+* MPI library implementations, where these attributes enable checking that
+ the buffer type matches the passed ``MPI_Datatype``;
+* for HDF5 library there is a similar use case to MPI;
+* checking types of variadic functions' arguments for functions like
+ ``fcntl()`` and ``ioctl()``.
+
+You can detect support for these attributes with ``__has_attribute()``. For
+example:
+
+.. code-block:: c++
+
+ #if defined(__has_attribute)
+ # if __has_attribute(argument_with_type_tag) && \
+ __has_attribute(pointer_with_type_tag) && \
+ __has_attribute(type_tag_for_datatype)
+ # define ATTR_MPI_PWT(buffer_idx, type_idx) __attribute__((pointer_with_type_tag(mpi,buffer_idx,type_idx)))
+ /* ... other macros ... */
+ # endif
+ #endif
+
+ #if !defined(ATTR_MPI_PWT)
+ # define ATTR_MPI_PWT(buffer_idx, type_idx)
+ #endif
+
+ int MPI_Send(void *buf, int count, MPI_Datatype datatype /*, other args omitted */)
+ ATTR_MPI_PWT(1,3);
+
+argument_with_type_tag
+----------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Use ``__attribute__((argument_with_type_tag(arg_kind, arg_idx,
+type_tag_idx)))`` on a function declaration to specify that the function
+accepts a type tag that determines the type of some other argument.
+``arg_kind`` is an identifier that should be used when annotating all
+applicable type tags.
+
+This attribute is primarily useful for checking arguments of variadic functions
+(``pointer_with_type_tag`` can be used in most non-variadic cases).
+
+For example:
+
+.. code-block:: c++
+
+ int fcntl(int fd, int cmd, ...)
+ __attribute__(( argument_with_type_tag(fcntl,3,2) ));
+
+
+pointer_with_type_tag
+---------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Use ``__attribute__((pointer_with_type_tag(ptr_kind, ptr_idx, type_tag_idx)))``
+on a function declaration to specify that the function accepts a type tag that
+determines the pointee type of some other pointer argument.
+
+For example:
+
+.. code-block:: c++
+
+ int MPI_Send(void *buf, int count, MPI_Datatype datatype /*, other args omitted */)
+ __attribute__(( pointer_with_type_tag(mpi,1,3) ));
+
+
+type_tag_for_datatype
+---------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","","","", ""
+
+Clang supports annotating type tags of two forms.
+
+* **Type tag that is an expression containing a reference to some declared
+ identifier.** Use ``__attribute__((type_tag_for_datatype(kind, type)))`` on a
+ declaration with that identifier:
+
+ .. code-block:: c++
+
+ extern struct mpi_datatype mpi_datatype_int
+ __attribute__(( type_tag_for_datatype(mpi,int) ));
+ #define MPI_INT ((MPI_Datatype) &mpi_datatype_int)
+
+* **Type tag that is an integral literal.** Introduce a ``static const``
+ variable with a corresponding initializer value and attach
+ ``__attribute__((type_tag_for_datatype(kind, type)))`` on that declaration,
+ for example:
+
+ .. code-block:: c++
+
+ #define MPI_INT ((MPI_Datatype) 42)
+ static const MPI_Datatype mpi_datatype_int
+ __attribute__(( type_tag_for_datatype(mpi,int) )) = 42
+
+The attribute also accepts an optional third argument that determines how the
+expression is compared to the type tag. There are two supported flags:
+
+* ``layout_compatible`` will cause types to be compared according to
+ layout-compatibility rules (C++11 [class.mem] p 17, 18). This is
+ implemented to support annotating types like ``MPI_DOUBLE_INT``.
+
+ For example:
+
+ .. code-block:: c++
+
+ /* In mpi.h */
+ struct internal_mpi_double_int { double d; int i; };
+ extern struct mpi_datatype mpi_datatype_double_int
+ __attribute__(( type_tag_for_datatype(mpi, struct internal_mpi_double_int, layout_compatible) ));
+
+ #define MPI_DOUBLE_INT ((MPI_Datatype) &mpi_datatype_double_int)
+
+ /* In user code */
+ struct my_pair { double a; int b; };
+ struct my_pair *buffer;
+ MPI_Send(buffer, 1, MPI_DOUBLE_INT /*, ... */); // no warning
+
+ struct my_int_pair { int a; int b; }
+ struct my_int_pair *buffer2;
+ MPI_Send(buffer2, 1, MPI_DOUBLE_INT /*, ... */); // warning: actual buffer element
+ // type 'struct my_int_pair'
+ // doesn't match specified MPI_Datatype
+
+* ``must_be_null`` specifies that the expression should be a null pointer
+ constant, for example:
+
+ .. code-block:: c++
+
+ /* In mpi.h */
+ extern struct mpi_datatype mpi_datatype_null
+ __attribute__(( type_tag_for_datatype(mpi, void, must_be_null) ));
+
+ #define MPI_DATATYPE_NULL ((MPI_Datatype) &mpi_datatype_null)
+
+ /* In user code */
+ MPI_Send(buffer, 1, MPI_DATATYPE_NULL /*, ... */); // warning: MPI_DATATYPE_NULL
+ // was specified but buffer
+ // is not a null pointer
+
+
+OpenCL Address Spaces
+=====================
+The address space qualifier may be used to specify the region of memory that is
+used to allocate the object. OpenCL supports the following address spaces:
+__generic(generic), __global(global), __local(local), __private(private),
+__constant(constant).
+
+ .. code-block:: c
+
+ __constant int c = ...;
+
+ __generic int* foo(global int* g) {
+ __local int* l;
+ private int p;
+ ...
+ return l;
+ }
+
+More details can be found in the OpenCL C language Spec v2.0, Section 6.5.
+
+__constant(constant)
+--------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+The constant address space attribute signals that an object is located in
+a constant (non-modifiable) memory region. It is available to all work items.
+Any type can be annotated with the constant address space attribute. Objects
+with the constant address space qualifier can be declared in any scope and must
+have an initializer.
+
+
+__generic(generic)
+------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+The generic address space attribute is only available with OpenCL v2.0 and later.
+It can be used with pointer types. Variables in global and local scope and
+function parameters in non-kernel functions can have the generic address space
+type attribute. It is intended to be a placeholder for any other address space
+except for '__constant' in OpenCL code which can be used with multiple address
+spaces.
+
+
+__global(global)
+----------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+The global address space attribute specifies that an object is allocated in
+global memory, which is accessible by all work items. The content stored in this
+memory area persists between kernel executions. Pointer types to the global
+address space are allowed as function parameters or local variables. Starting
+with OpenCL v2.0, the global address space can be used with global (program
+scope) variables and static local variable as well.
+
+
+__local(local)
+--------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+The local address space specifies that an object is allocated in the local (work
+group) memory area, which is accessible to all work items in the same work
+group. The content stored in this memory region is not accessible after
+the kernel execution ends. In a kernel function scope, any variable can be in
+the local address space. In other scopes, only pointer types to the local address
+space are allowed. Local address space variables cannot have an initializer.
+
+
+__private(private)
+------------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+The private address space specifies that an object is allocated in the private
+(work item) memory. Other work items cannot access the same memory area and its
+content is destroyed after work item execution ends. Local variables can be
+declared in the private address space. Function arguments are always in the
+private address space. Kernel function arguments of a pointer or an array type
+cannot point to the private address space.
+
+
+Nullability Attributes
+======================
+Whether a particular pointer may be "null" is an important concern when working with pointers in the C family of languages. The various nullability attributes indicate whether a particular pointer can be null or not, which makes APIs more expressive and can help static analysis tools identify bugs involving null pointers. Clang supports several kinds of nullability attributes: the ``nonnull`` and ``returns_nonnull`` attributes indicate which function or method parameters and result types can never be null, while nullability type qualifiers indicate which pointer types can be null (``_Nullable``) or cannot be null (``_Nonnull``).
+
+The nullability (type) qualifiers express whether a value of a given pointer type can be null (the ``_Nullable`` qualifier), doesn't have a defined meaning for null (the ``_Nonnull`` qualifier), or for which the purpose of null is unclear (the ``_Null_unspecified`` qualifier). Because nullability qualifiers are expressed within the type system, they are more general than the ``nonnull`` and ``returns_nonnull`` attributes, allowing one to express (for example) a nullable pointer to an array of nonnull pointers. Nullability qualifiers are written to the right of the pointer to which they apply. For example:
+
+ .. code-block:: c
+
+ // No meaningful result when 'ptr' is null (here, it happens to be undefined behavior).
+ int fetch(int * _Nonnull ptr) { return *ptr; }
+
+ // 'ptr' may be null.
+ int fetch_or_zero(int * _Nullable ptr) {
+ return ptr ? *ptr : 0;
+ }
+
+ // A nullable pointer to non-null pointers to const characters.
+ const char *join_strings(const char * _Nonnull * _Nullable strings, unsigned n);
+
+In Objective-C, there is an alternate spelling for the nullability qualifiers that can be used in Objective-C methods and properties using context-sensitive, non-underscored keywords. For example:
+
+ .. code-block:: objective-c
+
+ @interface NSView : NSResponder
+ - (nullable NSView *)ancestorSharedWithView:(nonnull NSView *)aView;
+ @property (assign, nullable) NSView *superview;
+ @property (readonly, nonnull) NSArray *subviews;
+ @end
+
+nonnull
+-------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+The ``nonnull`` attribute indicates that some function parameters must not be null, and can be used in several different ways. It's original usage (`from GCC <https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes>`_) is as a function (or Objective-C method) attribute that specifies which parameters of the function are nonnull in a comma-separated list. For example:
+
+ .. code-block:: c
+
+ extern void * my_memcpy (void *dest, const void *src, size_t len)
+ __attribute__((nonnull (1, 2)));
+
+Here, the ``nonnull`` attribute indicates that parameters 1 and 2
+cannot have a null value. Omitting the parenthesized list of parameter indices means that all parameters of pointer type cannot be null:
+
+ .. code-block:: c
+
+ extern void * my_memcpy (void *dest, const void *src, size_t len)
+ __attribute__((nonnull));
+
+Clang also allows the ``nonnull`` attribute to be placed directly on a function (or Objective-C method) parameter, eliminating the need to specify the parameter index ahead of type. For example:
+
+ .. code-block:: c
+
+ extern void * my_memcpy (void *dest __attribute__((nonnull)),
+ const void *src __attribute__((nonnull)), size_t len);
+
+Note that the ``nonnull`` attribute indicates that passing null to a non-null parameter is undefined behavior, which the optimizer may take advantage of to, e.g., remove null checks. The ``_Nonnull`` type qualifier indicates that a pointer cannot be null in a more general manner (because it is part of the type system) and does not imply undefined behavior, making it more widely applicable.
+
+
+returns_nonnull
+---------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "X","X","","", ""
+
+The ``returns_nonnull`` attribute indicates that a particular function (or Objective-C method) always returns a non-null pointer. For example, a particular system ``malloc`` might be defined to terminate a process when memory is not available rather than returning a null pointer:
+
+ .. code-block:: c
+
+ extern void * malloc (size_t size) __attribute__((returns_nonnull));
+
+The ``returns_nonnull`` attribute implies that returning a null pointer is undefined behavior, which the optimizer may take advantage of. The ``_Nonnull`` type qualifier indicates that a pointer cannot be null in a more general manner (because it is part of the type system) and does not imply undefined behavior, making it more widely applicable
+
+
+_Nonnull
+--------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+The ``_Nonnull`` nullability qualifier indicates that null is not a meaningful value for a value of the ``_Nonnull`` pointer type. For example, given a declaration such as:
+
+ .. code-block:: c
+
+ int fetch(int * _Nonnull ptr);
+
+a caller of ``fetch`` should not provide a null value, and the compiler will produce a warning if it sees a literal null value passed to ``fetch``. Note that, unlike the declaration attribute ``nonnull``, the presence of ``_Nonnull`` does not imply that passing null is undefined behavior: ``fetch`` is free to consider null undefined behavior or (perhaps for backward-compatibility reasons) defensively handle null.
+
+
+_Null_unspecified
+-----------------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+The ``_Null_unspecified`` nullability qualifier indicates that neither the ``_Nonnull`` nor ``_Nullable`` qualifiers make sense for a particular pointer type. It is used primarily to indicate that the role of null with specific pointers in a nullability-annotated header is unclear, e.g., due to overly-complex implementations or historical factors with a long-lived API.
+
+
+_Nullable
+---------
+.. csv-table:: Supported Syntaxes
+ :header: "GNU", "C++11", "__declspec", "Keyword", "Pragma"
+
+ "","","","X", ""
+
+The ``_Nullable`` nullability qualifier indicates that a value of the ``_Nullable`` pointer type can be null. For example, given:
+
+ .. code-block:: c
+
+ int fetch_or_zero(int * _Nullable ptr);
+
+a caller of ``fetch_or_zero`` can provide null.
+
+
diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt
index 47bb2e0c4b99..f42439a9bf1c 100644
--- a/docs/CMakeLists.txt
+++ b/docs/CMakeLists.txt
@@ -47,6 +47,14 @@ if (LLVM_ENABLE_DOXYGEN)
set(clang_doxygen_qhp_cust_filter_attrs "")
endif()
+ option(LLVM_DOXYGEN_SVG
+ "Use svg instead of png files for doxygen graphs." OFF)
+ if (LLVM_DOXYGEN_SVG)
+ set(DOT_IMAGE_FORMAT "svg")
+ else()
+ set(DOT_IMAGE_FORMAT "png")
+ endif()
+
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/doxygen.cfg.in
${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg @ONLY)
@@ -64,6 +72,7 @@ if (LLVM_ENABLE_DOXYGEN)
set(clang_doxygen_qhelpgenerator_path)
set(clang_doxygen_qhp_cust_filter_name)
set(clang_doxygen_qhp_cust_filter_attrs)
+ set(DOT_IMAGE_FORMAT)
add_custom_target(doxygen-clang
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
diff --git a/docs/ExternalClangExamples.rst b/docs/ExternalClangExamples.rst
index 71d50c23fa1e..e6076a5be6d1 100644
--- a/docs/ExternalClangExamples.rst
+++ b/docs/ExternalClangExamples.rst
@@ -20,7 +20,7 @@ where Clang is used are:
If you know of (or wrote!) a tool or project using Clang, please send an
email to Clang's `development discussion mailing list
-<http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev>`_ to have it added.
+<http://lists.llvm.org/mailman/listinfo/cfe-dev>`_ to have it added.
(or if you are already a Clang contributor, feel free to directly commit
additions). Since the primary purpose of this page is to provide examples
that can help developers, generally they must have code available.
diff --git a/docs/InternalsManual.rst b/docs/InternalsManual.rst
index 7f2a8fafccc0..7959179d4916 100644
--- a/docs/InternalsManual.rst
+++ b/docs/InternalsManual.rst
@@ -508,7 +508,7 @@ token. This concept maps directly to the "spelling location" for the token.
``SourceRange`` and ``CharSourceRange``
---------------------------------------
-.. mostly taken from http://lists.cs.uiuc.edu/pipermail/cfe-dev/2010-August/010595.html
+.. mostly taken from http://lists.llvm.org/pipermail/cfe-dev/2010-August/010595.html
Clang represents most source ranges by [first, last], where "first" and "last"
each point to the beginning of their respective tokens. For example consider
diff --git a/docs/Makefile b/docs/Makefile
index a409cf6025bd..a6c6e6c43be4 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -30,6 +30,7 @@ $(PROJ_OBJ_DIR)/doxygen.cfg: doxygen.cfg.in
-e 's/@enable_server_based_search@/NO/g' \
-e 's/@extra_search_mappings@//g' \
-e 's/@searchengine_url@//g' \
+ -e 's/@DOT_IMAGE_FORMAT@/png/g' \
> $@
endif
diff --git a/docs/ReleaseNotes.rst b/docs/ReleaseNotes.rst
index c6dc6aa8ba70..2941afd48fcd 100644
--- a/docs/ReleaseNotes.rst
+++ b/docs/ReleaseNotes.rst
@@ -1,6 +1,6 @@
-=====================================
-Clang 3.7 (In-Progress) Release Notes
-=====================================
+=======================
+Clang 3.7 Release Notes
+=======================
.. contents::
:local:
@@ -8,11 +8,6 @@ Clang 3.7 (In-Progress) Release Notes
Written by the `LLVM Team <http://llvm.org/>`_
-.. warning::
-
- These are in-progress notes for the upcoming Clang 3.7 release. You may
- prefer the `Clang 3.6 Release Notes
- <http://llvm.org/releases/3.6.0/tools/clang/docs/ReleaseNotes.html>`_.
Introduction
============
@@ -31,11 +26,6 @@ the latest release, please check out the main please see the `Clang Web
Site <http://clang.llvm.org>`_ or the `LLVM Web
Site <http://llvm.org>`_.
-Note that if you are reading this file from a Subversion checkout or the
-main Clang web page, this document applies to the *next* release, not
-the current one. To see the release notes for a specific release, please
-see the `releases page <http://llvm.org/releases/>`_.
-
What's New in Clang 3.7?
========================
@@ -52,14 +42,48 @@ Major New Features
extension is also enabled when compiling CUDA code, but its use should be
viewed as an implementation detail that is subject to change.
+- On Windows targets, some uses of the ``__try``, ``__except``, and
+ ``__finally`` language constructs are supported in Clang 3.7. MSVC-compatible
+ C++ exceptions are not yet supported, however.
+
+- Clang 3.7 fully supports OpenMP 3.1 and reported to work on many platforms,
+ including x86, x86-64 and Power. Also, pragma ``omp simd`` from OpenMP 4.0 is
+ supported as well. See below for details.
+
+- Clang 3.7 includes an implementation of :doc:`control flow integrity
+ <ControlFlowIntegrity>`, a security hardening mechanism.
+
+
Improvements to Clang's diagnostics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-----------------------------------
Clang's diagnostics are constantly being improved to catch more issues,
explain them more clearly, and provide more accurate source information
-about them. The improvements since the 3.5 release include:
+about them. The improvements since the 3.6 release include:
+
+- -Wrange-loop-analysis analyzes the loop variable type and the container type
+ to determine whether copies are made of the container elements. If possible,
+ suggest a const reference type to prevent copies, or a non-reference type
+ to indicate a copy is made.
+
+- -Wredundant-move warns when a parameter variable is moved on return and the
+ return type is the same as the variable. Returning the variable directly
+ will already make a move, so the call is not needed.
+
+- -Wpessimizing-move warns when a local variable is moved on return and the
+ return type is the same as the variable. Copy elision cannot take place with
+ a move, but can take place if the variable is returned directly.
+
+- -Wmove is a new warning group which has the previous two warnings,
+ -Wredundant-move and -Wpessimizing-move, as well as previous warning
+ -Wself-move. In addition, this group is part of -Wmost and -Wall now.
-- ...
+- -Winfinite-recursion, a warning for functions that only call themselves,
+ is now part of -Wmost and -Wall.
+
+- -Wobjc-circular-container prevents creation of circular containers,
+ it covers ``NSMutableArray``, ``NSMutableSet``, ``NSMutableDictionary``,
+ ``NSMutableOrderedSet`` and all their subclasses.
New Compiler Flags
------------------
@@ -69,94 +93,167 @@ The sized deallocation feature of C++14 is now controlled by the
isn't yet widely deployed, so the user must supply an extra flag to get the
extra functionality.
-The option ....
+Objective-C Language Changes in Clang
+-------------------------------------
+
+- ``objc_boxable`` attribute was added. Structs and unions marked with this attribute can be
+ used with boxed expressions (``@(...)``) to create ``NSValue``.
+
+Profile Guided Optimization
+---------------------------
+
+Clang now accepts GCC-compatible flags for profile guided optimization (PGO).
+You can now use ``-fprofile-generate=<dir>``, ``-fprofile-use=<dir>``,
+``-fno-profile-generate`` and ``-fno-profile-use``. These flags have the
+same semantics as their GCC counterparts. However, the generated profile
+is still LLVM-specific. PGO profiles generated with Clang cannot be used
+by GCC and vice-versa.
+
+Clang now emits function entry counts in profile-instrumented binaries.
+This has improved the computation of weights and frequencies in
+profile analysis.
+
+OpenMP Support
+--------------
+OpenMP 3.1 is fully supported, but disabled by default. To enable it, please use
+the ``-fopenmp=libomp`` command line option. Your feedback (positive or negative) on
+using OpenMP-enabled clang would be much appreciated; please share it either on
+`cfe-dev <http://lists.llvm.org/mailman/listinfo/cfe-dev>`_ or `openmp-dev
+<http://lists.llvm.org/mailman/listinfo/openmp-dev>`_ mailing lists.
+
+In addition to OpenMP 3.1, several important elements of the 4.0 version of the
+standard are supported as well:
+
+- ``omp simd``, ``omp for simd`` and ``omp parallel for simd`` pragmas
+- atomic constructs
+- ``proc_bind`` clause of ``omp parallel`` pragma
+- ``depend`` clause of ``omp task`` pragma (except for array sections)
+- ``omp cancel`` and ``omp cancellation point`` pragmas
+- ``omp taskgroup`` pragma
+
+Internal API Changes
+--------------------
+
+These are major API changes that have happened since the 3.6 release of
+Clang. If upgrading an external codebase that uses Clang as a library,
+this section should help get you past the largest hurdles of upgrading.
-New Pragmas in Clang
------------------------
+- Some of the ``PPCallbacks`` interface now deals in ``MacroDefinition``
+ objects instead of ``MacroDirective`` objects. This allows preserving
+ full information on macros imported from modules.
-Clang now supports the ...
+- ``clang-c/Index.h`` no longer ``#include``\s ``clang-c/Documentation.h``.
+ You now need to explicitly ``#include "clang-c/Documentation.h"`` if
+ you use the libclang documentation API.
-Windows Support
+Static Analyzer
---------------
-Clang's support for building native Windows programs ...
+* The generated plists now contain the name of the check that generated it.
+* Configuration options can now be passed to the checkers (not just the static
+ analyzer core).
-C Language Changes in Clang
----------------------------
+* New check for dereferencing object that the result of a zero-length
+ allocation.
-...
+* Also check functions in precompiled headers.
-C11 Feature Support
-^^^^^^^^^^^^^^^^^^^
+* Properly handle alloca() in some checkers.
-...
+* Various improvements to the retain count checker.
-C++ Language Changes in Clang
------------------------------
-- ...
+clang-tidy
+----------
+Added new checks:
-C++11 Feature Support
-^^^^^^^^^^^^^^^^^^^^^
+* google-global-names-in-headers: flag global namespace pollution in header
+ files.
-...
+* misc-assert-side-effect: detects ``assert()`` conditions with side effects
+ which can cause different behavior in debug / release builds.
-Objective-C Language Changes in Clang
--------------------------------------
+* misc-assign-operator-signature: finds declarations of assign operators with
+ the wrong return and/or argument types.
-...
+* misc-inaccurate-erase: warns when some elements of a container are not
+ removed due to using the ``erase()`` algorithm incorrectly.
-OpenCL C Language Changes in Clang
-----------------------------------
+* misc-inefficient-algorithm: warns on inefficient use of STL algorithms on
+ associative containers.
-...
+* misc-macro-parentheses: finds macros that can have unexpected behavior due
+ to missing parentheses.
-Internal API Changes
---------------------
+* misc-macro-repeated-side-effects: checks for repeated argument with side
+ effects in macros.
-These are major API changes that have happened since the 3.6 release of
-Clang. If upgrading an external codebase that uses Clang as a library,
-this section should help get you past the largest hurdles of upgrading.
+* misc-noexcept-move-constructor: flags user-defined move constructors and
+ assignment operators not marked with ``noexcept`` or marked with
+ ``noexcept(expr)`` where ``expr`` evaluates to ``false`` (but is not a
+ ``false`` literal itself).
-- Some of the `PPCallbacks` interface now deals in `MacroDefinition`
- objects instead of `MacroDirective` objects. This allows preserving
- full information on macros imported from modules.
+* misc-static-assert: replaces ``assert()`` with ``static_assert()`` if the
+ condition is evaluable at compile time.
-- `clang-c/Index.h` no longer `#include`\s `clang-c/Documentation.h`.
- You now need to explicitly `#include "clang-c/Documentation.h"` if
- you use the libclang documentation API.
+* readability-container-size-empty: checks whether a call to the ``size()``
+ method can be replaced with a call to ``empty()``.
-libclang
---------
+* readability-else-after-return: flags conditional statements having the
+ ``else`` branch, when the ``true`` branch has a ``return`` as the last statement.
-...
+* readability-redundant-string-cstr: finds unnecessary calls to
+ ``std::string::c_str()``.
-Static Analyzer
----------------
+* readability-shrink-to-fit: replaces copy and swap tricks on shrinkable
+ containers with the ``shrink_to_fit()`` method call.
+
+* readability-simplify-boolean-expr: looks for boolean expressions involving
+ boolean constants and simplifies them to use the appropriate boolean
+ expression directly (``if (x == true) ... -> if (x)``, etc.)
-...
+SystemZ
+-------
-Core Analysis Improvements
-==========================
+* Clang will now always default to the z10 processor when compiling
+ without any ``-march=`` option. Previous releases used to automatically
+ detect the current host CPU when compiling natively. If you wish to
+ still have clang detect the current host CPU, you now need to use the
+ ``-march=native`` option.
-- ...
+* Clang now provides the ``<s390intrin.h>`` header file.
-New Issues Found
-================
+* Clang now supports the transactional-execution facility and
+ provides associated builtins and the ``<htmintrin.h>`` and
+ ``<htmxlintrin.h>`` header files. Support is enabled by default
+ on zEC12 and above, and can additionally be enabled or disabled
+ via the ``-mhtm`` / ``-mno-htm`` command line options.
-- ...
+* Clang now supports the vector facility. This includes a
+ change in the ABI to pass arguments and return values of
+ vector types in vector registers, as well as a change in
+ the default alignment of vector types. Support is enabled
+ by default on z13 and above, and can additionally be enabled
+ or disabled via the ``-mvx`` / ``-mno-vx`` command line options.
-Python Binding Changes
-----------------------
+* Clang now supports the System z vector language extension,
+ providing a "vector" keyword to define vector types, and a
+ set of builtins defined in the ``<vecintrin.h>`` header file.
+ This can be enabled via the ``-fzvector`` command line option.
+ For compatibility with GCC, Clang also supports the
+ ``-mzvector`` option as an alias.
+
+* Several cases of ABI incompatibility with GCC have been fixed.
-The following methods have been added:
-- ...
+Last release which will run on Windows XP and Windows Vista
+-----------------------------------------------------------
-Significant Known Problems
-==========================
+This is expected to the be the last major release of Clang that will support
+running on Windows XP and Windows Vista. For the next major release the
+minimum Windows version requirement will be Windows 7.
Additional Information
======================
@@ -170,4 +267,4 @@ tree.
If you have any questions or comments about Clang, please feel free to
contact us via the `mailing
-list <http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev>`_.
+list <http://lists.llvm.org/mailman/listinfo/cfe-dev>`_.
diff --git a/docs/UsersManual.rst b/docs/UsersManual.rst
index 20ee5696e06a..78d8dcc11a9f 100644
--- a/docs/UsersManual.rst
+++ b/docs/UsersManual.rst
@@ -1985,7 +1985,7 @@ with a warning. For example:
::
- clang-cl.exe: warning: argument unused during compilation: '/Zi'
+ clang-cl.exe: warning: argument unused during compilation: '/AI'
To suppress warnings about unused arguments, use the ``-Qunused-arguments`` option.
@@ -2014,14 +2014,21 @@ Execute ``clang-cl /?`` to see a list of supported options:
/E Preprocess to stdout
/fallback Fall back to cl.exe if clang-cl fails to compile
/FA Output assembly code file during compilation
- /Fa<file or directory> Output assembly code to this file during compilation
+ /Fa<file or directory> Output assembly code to this file during compilation (with /FA)
/Fe<file or directory> Set output executable file or directory (ends in / or \)
/FI <value> Include file before parsing
- /Fi<file> Set preprocess output file name
- /Fo<file or directory> Set output object file, or directory (ends in / or \)
+ /Fi<file> Set preprocess output file name (with /P)
+ /Fo<file or directory> Set output object file, or directory (ends in / or \) (with /c)
+ /fp:except-
+ /fp:except
+ /fp:fast
+ /fp:precise
+ /fp:strict
+ /GA Assume thread-local variables are defined in the executable
/GF- Disable string pooling
/GR- Disable emission of RTTI data
/GR Enable emission of RTTI data
+ /Gs<value> Set stack probe size
/Gw- Don't put each data item in its own section
/Gw Put each data item in its own section
/Gy- Don't put each function in its own section
@@ -2046,7 +2053,10 @@ Execute ``clang-cl /?`` to see a list of supported options:
/Oy- Disable frame pointer omission
/Oy Enable frame pointer omission
/O<n> Optimization level
+ /o <file or directory> Set output file or directory (ends in / or \)
/P Preprocess to file
+ /Qvec- Disable the loop vectorization passes
+ /Qvec Enable the loop vectorization passes
/showIncludes Print info about included files to stderr
/TC Treat all source files as C
/Tc <filename> Specify a C source file
@@ -2059,6 +2069,8 @@ Execute ``clang-cl /?`` to see a list of supported options:
/vmm Set the default most-general representation to multiple inheritance
/vms Set the default most-general representation to single inheritance
/vmv Set the default most-general representation to virtual inheritance
+ /volatile:iso Volatile loads and stores have standard semantics
+ /volatile:ms Volatile loads and stores have acquire and release semantics
/W0 Disable all warnings
/W1 Enable -Wall
/W2 Enable -Wall
@@ -2068,29 +2080,52 @@ Execute ``clang-cl /?`` to see a list of supported options:
/WX- Do not treat warnings as errors
/WX Treat warnings as errors
/w Disable all warnings
+ /Zc:sizedDealloc- Disable C++14 sized global deallocation functions
+ /Zc:sizedDealloc Enable C++14 sized global deallocation functions
+ /Zc:strictStrings Treat string literals as const
+ /Zc:threadSafeInit- Disable thread-safe initialization of static variables
+ /Zc:threadSafeInit Enable thread-safe initialization of static variables
+ /Zc:trigraphs- Disable trigraphs (default)
+ /Zc:trigraphs Enable trigraphs
/Zi Enable debug information
/Zp Set the default maximum struct packing alignment to 1
/Zp<value> Specify the default maximum struct packing alignment
/Zs Syntax-check only
OPTIONS:
- -### Print (but do not run) the commands to run for this compilation
+ -### Print (but do not run) the commands to run for this compilation
+ --analyze Run the static analyzer
+ -fansi-escape-codes Use ANSI escape codes for diagnostics
+ -fcolor-diagnostics Use colors in diagnostics
+ -fdiagnostics-parseable-fixits
+ Print fix-its in machine parseable form
-fms-compatibility-version=<value>
- Dot-separated value representing the Microsoft compiler version
- number to report in _MSC_VER (0 = don't define it (default))
- -fmsc-version=<value> Microsoft compiler version number to report in _MSC_VER (0 = don't
- define it (default))
+ Dot-separated value representing the Microsoft compiler version
+ number to report in _MSC_VER (0 = don't define it (default))
+ -fmsc-version=<value> Microsoft compiler version number to report in _MSC_VER (0 = don't
+ define it (default))
+ -fno-sanitize-coverage=<value>
+ Disable specified features of coverage instrumentation for Sanitizers
+ -fno-sanitize-recover=<value>
+ Disable recovery for specified sanitizers
+ -fno-sanitize-trap=<value>
+ Disable trapping for specified sanitizers
-fsanitize-blacklist=<value>
- Path to blacklist file for sanitizers
- -fsanitize=<check> Enable runtime instrumentation for bug detection: address (memory
- errors) | thread (race detection) | undefined (miscellaneous
- undefined behavior)
- -mllvm <value> Additional arguments to forward to LLVM's option processing
- -Qunused-arguments Don't emit warning for unused driver arguments
- --target=<value> Generate code for the given target
- -v Show commands to run and use verbose output
- -W<warning> Enable the specified warning
- -Xclang <arg> Pass <arg> to the clang compiler
+ Path to blacklist file for sanitizers
+ -fsanitize-coverage=<value>
+ Specify the type of coverage instrumentation for Sanitizers
+ -fsanitize-recover=<value>
+ Enable recovery for specified sanitizers
+ -fsanitize-trap=<value> Enable trapping for specified sanitizers
+ -fsanitize=<check> Turn on runtime checks for various forms of undefined or suspicious
+ behavior. See user manual for available checks
+ -mllvm <value> Additional arguments to forward to LLVM's option processing
+ -Qunused-arguments Don't emit warning for unused driver arguments
+ -R<remark> Enable the specified remark
+ --target=<value> Generate code for the given target
+ -v Show commands to run and use verbose output
+ -W<warning> Enable the specified warning
+ -Xclang <arg> Pass <arg> to the clang compiler
The /fallback Option
^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/doxygen.cfg.in b/docs/doxygen.cfg.in
index 39c9aed57e01..b4cfbbdf7f42 100644
--- a/docs/doxygen.cfg.in
+++ b/docs/doxygen.cfg.in
@@ -2205,7 +2205,7 @@ DIRECTORY_GRAPH = YES
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_IMAGE_FORMAT = png
+DOT_IMAGE_FORMAT = @DOT_IMAGE_FORMAT@
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
diff --git a/include/clang/AST/ASTVector.h b/include/clang/AST/ASTVector.h
index 6ec054582e26..79453bf10871 100644
--- a/include/clang/AST/ASTVector.h
+++ b/include/clang/AST/ASTVector.h
@@ -384,14 +384,15 @@ void ASTVector<T>::grow(const ASTContext &C, size_t MinSize) {
T *NewElts = new (C, llvm::alignOf<T>()) T[NewCapacity];
// Copy the elements over.
- if (std::is_class<T>::value) {
- std::uninitialized_copy(Begin, End, NewElts);
- // Destroy the original elements.
- destroy_range(Begin, End);
- }
- else {
- // Use memcpy for PODs (std::uninitialized_copy optimizes to memmove).
- memcpy(NewElts, Begin, CurSize * sizeof(T));
+ if (Begin != End) {
+ if (std::is_class<T>::value) {
+ std::uninitialized_copy(Begin, End, NewElts);
+ // Destroy the original elements.
+ destroy_range(Begin, End);
+ } else {
+ // Use memcpy for PODs (std::uninitialized_copy optimizes to memmove).
+ memcpy(NewElts, Begin, CurSize * sizeof(T));
+ }
}
// ASTContext never frees any memory.
diff --git a/include/clang/AST/NSAPI.h b/include/clang/AST/NSAPI.h
index ce2c7cea9e02..583f9d9f1deb 100644
--- a/include/clang/AST/NSAPI.h
+++ b/include/clang/AST/NSAPI.h
@@ -16,6 +16,7 @@
namespace clang {
class ASTContext;
+ class ObjCInterfaceDecl;
class QualType;
class Expr;
@@ -35,11 +36,10 @@ public:
ClassId_NSMutableDictionary,
ClassId_NSNumber,
ClassId_NSMutableSet,
- ClassId_NSCountedSet,
ClassId_NSMutableOrderedSet,
ClassId_NSValue
};
- static const unsigned NumClassIds = 11;
+ static const unsigned NumClassIds = 10;
enum NSStringMethodKind {
NSStr_stringWithString,
@@ -220,6 +220,10 @@ public:
/// \brief Returns \c true if \p Id is currently defined as a macro.
bool isMacroDefined(StringRef Id) const;
+ /// \brief Returns \c true if \p InterfaceDecl is subclass of \p NSClassKind
+ bool isSubclassOfNSClass(ObjCInterfaceDecl *InterfaceDecl,
+ NSClassIdKindKind NSClassKind) const;
+
private:
bool isObjCTypedef(QualType T, StringRef name, IdentifierInfo *&II) const;
bool isObjCEnumerator(const Expr *E,
diff --git a/include/clang/AST/StmtOpenMP.h b/include/clang/AST/StmtOpenMP.h
index b412daaf2858..708b8667335e 100644
--- a/include/clang/AST/StmtOpenMP.h
+++ b/include/clang/AST/StmtOpenMP.h
@@ -312,18 +312,26 @@ class OMPLoopDirective : public OMPExecutableDirective {
}
/// \brief Get the updates storage.
- MutableArrayRef<Expr *> getUpdates() {
+ MutableArrayRef<Expr *> getInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
+ /// \brief Get the updates storage.
+ MutableArrayRef<Expr *> getUpdates() {
+ Expr **Storage = reinterpret_cast<Expr **>(
+ &*std::next(child_begin(),
+ getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
+ return MutableArrayRef<Expr *>(Storage, CollapsedNum);
+ }
+
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
- getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
+ getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
@@ -358,7 +366,7 @@ protected:
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
- 3 * CollapsedNum; // Counters, Updates and Finals
+ 4 * CollapsedNum; // Counters, Inits, Updates and Finals
}
void setIterationVariable(Expr *IV) {
@@ -414,6 +422,7 @@ protected:
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setCounters(ArrayRef<Expr *> A);
+ void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
@@ -453,6 +462,8 @@ public:
Expr *NUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
+ /// \brief Expressions for loop counters inits for CodeGen.
+ SmallVector<Expr *, 4> Inits;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
@@ -484,10 +495,12 @@ public:
NLB = nullptr;
NUB = nullptr;
Counters.resize(Size);
+ Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
+ Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
@@ -584,6 +597,12 @@ public:
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
+ ArrayRef<Expr *> inits() { return getInits(); }
+
+ ArrayRef<Expr *> inits() const {
+ return const_cast<OMPLoopDirective *>(this)->getInits();
+ }
+
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
diff --git a/include/clang/Analysis/Support/BumpVector.h b/include/clang/Analysis/Support/BumpVector.h
index 841adf64557d..3abe32d79cce 100644
--- a/include/clang/Analysis/Support/BumpVector.h
+++ b/include/clang/Analysis/Support/BumpVector.h
@@ -223,14 +223,15 @@ void BumpVector<T>::grow(BumpVectorContext &C, size_t MinSize) {
T *NewElts = C.getAllocator().template Allocate<T>(NewCapacity);
// Copy the elements over.
- if (std::is_class<T>::value) {
- std::uninitialized_copy(Begin, End, NewElts);
- // Destroy the original elements.
- destroy_range(Begin, End);
- }
- else {
- // Use memcpy for PODs (std::uninitialized_copy optimizes to memmove).
- memcpy(NewElts, Begin, CurSize * sizeof(T));
+ if (Begin != End) {
+ if (std::is_class<T>::value) {
+ std::uninitialized_copy(Begin, End, NewElts);
+ // Destroy the original elements.
+ destroy_range(Begin, End);
+ } else {
+ // Use memcpy for PODs (std::uninitialized_copy optimizes to memmove).
+ memcpy(NewElts, Begin, CurSize * sizeof(T));
+ }
}
// For now, leak 'Begin'. We can add it back to a freelist in
diff --git a/include/clang/Basic/Attr.td b/include/clang/Basic/Attr.td
index fb1eb58dcc70..6187bcb2c4bf 100644
--- a/include/clang/Basic/Attr.td
+++ b/include/clang/Basic/Attr.td
@@ -1133,7 +1133,7 @@ def ObjCRuntimeName : Attr {
def ObjCBoxable : Attr {
let Spellings = [GNU<"objc_boxable">];
let Subjects = SubjectList<[Record], ErrorDiag, "ExpectedStructOrUnion">;
- let Documentation = [Undocumented];
+ let Documentation = [ObjCBoxableDocs];
}
def OptimizeNone : InheritableAttr {
diff --git a/include/clang/Basic/AttrDocs.td b/include/clang/Basic/AttrDocs.td
index e4ca0cb4778e..48660166f6c6 100644
--- a/include/clang/Basic/AttrDocs.td
+++ b/include/clang/Basic/AttrDocs.td
@@ -492,6 +492,34 @@ can only be placed before an @protocol or @interface declaration:
}];
}
+def ObjCBoxableDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+Structs and unions marked with the ``objc_boxable`` attribute can be used
+with the Objective-C boxed expression syntax, ``@(...)``.
+
+**Usage**: ``__attribute__((objc_boxable))``. This attribute
+can only be placed on a declaration of a trivially-copyable struct or union:
+
+.. code-block:: objc
+
+ struct __attribute__((objc_boxable)) some_struct {
+ int i;
+ };
+ union __attribute__((objc_boxable)) some_union {
+ int i;
+ float f;
+ };
+ typedef struct __attribute__((objc_boxable)) _some_struct some_struct;
+
+ // ...
+
+ some_struct ss;
+ NSValue *boxed = @(ss);
+
+ }];
+}
+
def AvailabilityDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
diff --git a/include/clang/Basic/DiagnosticCommonKinds.td b/include/clang/Basic/DiagnosticCommonKinds.td
index ff42683ecdde..fc3ca62113e5 100644
--- a/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/include/clang/Basic/DiagnosticCommonKinds.td
@@ -195,6 +195,8 @@ def err_unable_to_make_temp : Error<
// Modules
def err_module_file_conflict : Error<"module '%0' found in both '%1' and '%2'">;
+def err_module_format_unhandled : Error<
+ "no handler registered for module format '%0'">;
// TransformActions
// TODO: Use a custom category name to distinguish rewriter errors.
diff --git a/include/clang/Basic/DiagnosticParseKinds.td b/include/clang/Basic/DiagnosticParseKinds.td
index 1364b982ecf1..e4f859943266 100644
--- a/include/clang/Basic/DiagnosticParseKinds.td
+++ b/include/clang/Basic/DiagnosticParseKinds.td
@@ -358,6 +358,10 @@ def err_invalid_pixel_decl_spec_combination : Error<
"'%0' declaration specifier not allowed here">;
def err_invalid_vector_bool_decl_spec : Error<
"cannot use '%0' with '__vector bool'">;
+def err_invalid_vector_long_decl_spec : Error<
+ "cannot use 'long' with '__vector'">;
+def err_invalid_vector_float_decl_spec : Error<
+ "cannot use 'float' with '__vector'">;
def err_invalid_vector_double_decl_spec : Error <
"use of 'double' with '__vector' requires VSX support to be enabled "
"(available on POWER7 or later)">;
diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td
index fb1e3f1cb46e..82f512130885 100644
--- a/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/include/clang/Basic/DiagnosticSemaKinds.td
@@ -5358,7 +5358,7 @@ def err_objc_object_catch : Error<
def err_incomplete_type_objc_at_encode : Error<
"'@encode' of incomplete type %0">;
def warn_objc_circular_container : Warning<
- "adding '%0' to '%0' might cause circular dependency in container">,
+ "adding '%0' to '%1' might cause circular dependency in container">,
InGroup<DiagGroup<"objc-circular-container">>;
def note_objc_circular_container_declared_here : Note<"'%0' declared here">;
diff --git a/include/clang/Basic/LangOptions.def b/include/clang/Basic/LangOptions.def
index 8d606a12d046..c184df77c37e 100644
--- a/include/clang/Basic/LangOptions.def
+++ b/include/clang/Basic/LangOptions.def
@@ -104,6 +104,7 @@ LANGOPT(WritableStrings , 1, 0, "writable string support")
LANGOPT(ConstStrings , 1, 0, "const-qualified string support")
LANGOPT(LaxVectorConversions , 1, 1, "lax vector conversions")
LANGOPT(AltiVec , 1, 0, "AltiVec-style vector initializers")
+LANGOPT(ZVector , 1, 0, "System z vector extensions")
LANGOPT(Exceptions , 1, 0, "exception handling")
LANGOPT(ObjCExceptions , 1, 0, "Objective-C exceptions")
LANGOPT(CXXExceptions , 1, 0, "C++ exceptions")
diff --git a/include/clang/Basic/TokenKinds.def b/include/clang/Basic/TokenKinds.def
index 7a91c9f502b7..8333a4ccf8db 100644
--- a/include/clang/Basic/TokenKinds.def
+++ b/include/clang/Basic/TokenKinds.def
@@ -239,6 +239,8 @@ PUNCTUATOR(greatergreatergreater, ">>>")
// KEYOPENCL - This is a keyword in OpenCL
// KEYNOOPENCL - This is a keyword that is not supported in OpenCL
// KEYALTIVEC - This is a keyword in AltiVec
+// KEYZVECTOR - This is a keyword for the System z vector extensions,
+// which are heavily based on AltiVec
// KEYBORLAND - This is a keyword if Borland extensions are enabled
// BOOLSUPPORT - This is a keyword if 'bool' is a built-in type
// HALFSUPPORT - This is a keyword if 'half' is a built-in type
@@ -501,7 +503,7 @@ ALIAS("write_only", __write_only , KEYOPENCL)
ALIAS("read_write", __read_write , KEYOPENCL)
// OpenCL builtins
KEYWORD(__builtin_astype , KEYOPENCL)
-KEYWORD(vec_step , KEYOPENCL|KEYALTIVEC)
+KEYWORD(vec_step , KEYOPENCL|KEYALTIVEC|KEYZVECTOR)
// OpenMP Type Traits
KEYWORD(__builtin_omp_required_simd_align, KEYALL)
@@ -510,9 +512,9 @@ KEYWORD(__builtin_omp_required_simd_align, KEYALL)
KEYWORD(__pascal , KEYALL)
// Altivec Extension.
-KEYWORD(__vector , KEYALTIVEC)
+KEYWORD(__vector , KEYALTIVEC|KEYZVECTOR)
KEYWORD(__pixel , KEYALTIVEC)
-KEYWORD(__bool , KEYALTIVEC)
+KEYWORD(__bool , KEYALTIVEC|KEYZVECTOR)
// ARM NEON extensions.
ALIAS("__fp16", half , KEYALL)
diff --git a/include/clang/CodeGen/ObjectFilePCHContainerOperations.h b/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
index 4540efb29781..e82aab787515 100644
--- a/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
+++ b/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
@@ -14,30 +14,32 @@
namespace clang {
-/// \brief A PCHContainerOperations implementation that uses LLVM to
+/// A PCHContainerWriter implementation that uses LLVM to
/// wraps Clang modules inside a COFF, ELF, or Mach-O container.
-class ObjectFilePCHContainerOperations
- : public PCHContainerOperations {
- /// \brief Return an ASTConsumer that can be chained with a
+class ObjectFilePCHContainerWriter : public PCHContainerWriter {
+ StringRef getFormat() const override { return "obj"; }
+
+ /// Return an ASTConsumer that can be chained with a
/// PCHGenerator that produces a wrapper file format
/// that also contains full debug info for the module.
- std::unique_ptr<ASTConsumer>
- CreatePCHContainerGenerator(
+ std::unique_ptr<ASTConsumer> CreatePCHContainerGenerator(
DiagnosticsEngine &Diags, const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO, const TargetOptions &TO,
const LangOptions &LO, const std::string &MainFileName,
const std::string &OutputFileName, llvm::raw_pwrite_stream *OS,
std::shared_ptr<PCHBuffer> Buffer) const override;
+};
+
+/// A PCHContainerReader implementation that uses LLVM to
+/// wraps Clang modules inside a COFF, ELF, or Mach-O container.
+class ObjectFilePCHContainerReader : public PCHContainerReader {
+ StringRef getFormat() const override { return "obj"; }
- /// \brief Initialize an llvm::BitstreamReader with the serialized
+ /// Initialize an llvm::BitstreamReader with the serialized
/// AST inside the PCH container Buffer.
void ExtractPCH(llvm::MemoryBufferRef Buffer,
llvm::BitstreamReader &StreamFile) const override;
-
-
};
-
}
-
#endif
diff --git a/include/clang/Driver/CC1Options.td b/include/clang/Driver/CC1Options.td
index 60cc6ec11568..d2f0d05eedb8 100644
--- a/include/clang/Driver/CC1Options.td
+++ b/include/clang/Driver/CC1Options.td
@@ -369,6 +369,9 @@ def fmodules_local_submodule_visibility :
Flag<["-"], "fmodules-local-submodule-visibility">,
HelpText<"Enforce name visibility rules across submodules of the same "
"top-level module.">;
+def fmodule_format_EQ : Joined<["-"], "fmodule-format=">,
+ HelpText<"Select the container format for clang modules and PCH. "
+ "Supported options are 'raw' and 'obj'.">;
def fno_modules_hide_internal_linkage :
Flag<["-"], "fno-modules-hide-internal-linkage">,
HelpText<"Make all declarations visible to redeclaration lookup, "
diff --git a/include/clang/Driver/Options.td b/include/clang/Driver/Options.td
index 6e5dbf225bb6..9d3e2cfccdc7 100644
--- a/include/clang/Driver/Options.td
+++ b/include/clang/Driver/Options.td
@@ -1351,6 +1351,13 @@ def mno_altivec : Flag<["-"], "mno-altivec">, Alias<fno_altivec>;
def mvx : Flag<["-"], "mvx">, Group<m_Group>;
def mno_vx : Flag<["-"], "mno-vx">, Group<m_Group>;
+def fzvector : Flag<["-"], "fzvector">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable System z vector language extension">;
+def fno_zvector : Flag<["-"], "fno-zvector">, Group<f_Group>,
+ Flags<[CC1Option]>;
+def mzvector : Flag<["-"], "mzvector">, Alias<fzvector>;
+def mno_zvector : Flag<["-"], "mno-zvector">, Alias<fno_zvector>;
+
def mno_warn_nonportable_cfstrings : Flag<["-"], "mno-warn-nonportable-cfstrings">, Group<m_Group>;
def mno_omit_leaf_frame_pointer : Flag<["-"], "mno-omit-leaf-frame-pointer">, Group<m_Group>;
def momit_leaf_frame_pointer : Flag<["-"], "momit-leaf-frame-pointer">, Group<m_Group>,
diff --git a/include/clang/Frontend/ASTUnit.h b/include/clang/Frontend/ASTUnit.h
index 2d38352d22a8..fa4bcf2edd22 100644
--- a/include/clang/Frontend/ASTUnit.h
+++ b/include/clang/Frontend/ASTUnit.h
@@ -57,6 +57,7 @@ class FileManager;
class HeaderSearch;
class Preprocessor;
class PCHContainerOperations;
+class PCHContainerReader;
class SourceManager;
class TargetInfo;
class ASTFrontendAction;
@@ -725,8 +726,7 @@ public:
///
/// \returns - The initialized ASTUnit or null if the AST failed to load.
static std::unique_ptr<ASTUnit> LoadFromASTFile(
- const std::string &Filename,
- std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts, bool OnlyLocalDecls = false,
ArrayRef<RemappedFile> RemappedFiles = None,
diff --git a/include/clang/Frontend/CompilerInstance.h b/include/clang/Frontend/CompilerInstance.h
index 2f3e1b6cebb1..45e5ed12046f 100644
--- a/include/clang/Frontend/CompilerInstance.h
+++ b/include/clang/Frontend/CompilerInstance.h
@@ -183,7 +183,7 @@ class CompilerInstance : public ModuleLoader {
public:
explicit CompilerInstance(
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>(),
+ std::make_shared<PCHContainerOperations>(),
bool BuildingModule = false);
~CompilerInstance() override;
@@ -508,6 +508,34 @@ public:
return ThePCHContainerOperations;
}
+ /// Return the appropriate PCHContainerWriter depending on the
+ /// current CodeGenOptions.
+ const PCHContainerWriter &getPCHContainerWriter() const {
+ assert(Invocation && "cannot determine module format without invocation");
+ StringRef Format = getHeaderSearchOpts().ModuleFormat;
+ auto *Writer = ThePCHContainerOperations->getWriterOrNull(Format);
+ if (!Writer) {
+ if (Diagnostics)
+ Diagnostics->Report(diag::err_module_format_unhandled) << Format;
+ llvm::report_fatal_error("unknown module format");
+ }
+ return *Writer;
+ }
+
+ /// Return the appropriate PCHContainerReader depending on the
+ /// current CodeGenOptions.
+ const PCHContainerReader &getPCHContainerReader() const {
+ assert(Invocation && "cannot determine module format without invocation");
+ StringRef Format = getHeaderSearchOpts().ModuleFormat;
+ auto *Reader = ThePCHContainerOperations->getReaderOrNull(Format);
+ if (!Reader) {
+ if (Diagnostics)
+ Diagnostics->Report(diag::err_module_format_unhandled) << Format;
+ llvm::report_fatal_error("unknown module format");
+ }
+ return *Reader;
+ }
+
/// }
/// @name Code Completion
/// {
@@ -621,7 +649,7 @@ public:
static IntrusiveRefCntPtr<ASTReader> createPCHExternalASTSource(
StringRef Path, StringRef Sysroot, bool DisablePCHValidation,
bool AllowPCHWithCompilerErrors, Preprocessor &PP, ASTContext &Context,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
void *DeserializationListener, bool OwnDeserializationListener,
bool Preamble, bool UseGlobalModuleIndex);
diff --git a/include/clang/Frontend/PCHContainerOperations.h b/include/clang/Frontend/PCHContainerOperations.h
index 949ee63c615d..868ea6866ad6 100644
--- a/include/clang/Frontend/PCHContainerOperations.h
+++ b/include/clang/Frontend/PCHContainerOperations.h
@@ -11,6 +11,7 @@
#define LLVM_CLANG_PCH_CONTAINER_OPERATIONS_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/Support/MemoryBuffer.h"
#include <memory>
@@ -19,6 +20,8 @@ class raw_pwrite_stream;
class BitstreamReader;
}
+using llvm::StringRef;
+
namespace clang {
class ASTConsumer;
@@ -33,14 +36,16 @@ struct PCHBuffer {
bool IsComplete;
llvm::SmallVector<char, 0> Data;
};
+
+/// This abstract interface provides operations for creating
+/// containers for serialized ASTs (precompiled headers and clang
+/// modules).
+class PCHContainerWriter {
+public:
+ virtual ~PCHContainerWriter() = 0;
+ virtual StringRef getFormat() const = 0;
-/// \brief This abstract interface provides operations for creating
-/// and unwrapping containers for serialized ASTs (precompiled headers
-/// and clang modules).
-class PCHContainerOperations {
-public:
- virtual ~PCHContainerOperations();
- /// \brief Return an ASTConsumer that can be chained with a
+ /// Return an ASTConsumer that can be chained with a
/// PCHGenerator that produces a wrapper file format containing a
/// serialized AST bitstream.
virtual std::unique_ptr<ASTConsumer> CreatePCHContainerGenerator(
@@ -49,16 +54,28 @@ public:
const LangOptions &LO, const std::string &MainFileName,
const std::string &OutputFileName, llvm::raw_pwrite_stream *OS,
std::shared_ptr<PCHBuffer> Buffer) const = 0;
+};
- /// \brief Initialize an llvm::BitstreamReader with the serialized AST inside
+/// This abstract interface provides operations for unwrapping
+/// containers for serialized ASTs (precompiled headers and clang
+/// modules).
+class PCHContainerReader {
+public:
+ virtual ~PCHContainerReader() = 0;
+ /// Equivalent to the format passed to -fmodule-format=
+ virtual StringRef getFormat() const = 0;
+
+ /// Initialize an llvm::BitstreamReader with the serialized AST inside
/// the PCH container Buffer.
virtual void ExtractPCH(llvm::MemoryBufferRef Buffer,
llvm::BitstreamReader &StreamFile) const = 0;
};
-/// \brief Implements a raw pass-through PCH container.
-class RawPCHContainerOperations : public PCHContainerOperations {
- /// \brief Return an ASTConsumer that can be chained with a
+/// Implements write operations for a raw pass-through PCH container.
+class RawPCHContainerWriter : public PCHContainerWriter {
+ StringRef getFormat() const override { return "raw"; }
+
+ /// Return an ASTConsumer that can be chained with a
/// PCHGenerator that writes the module to a flat file.
std::unique_ptr<ASTConsumer> CreatePCHContainerGenerator(
DiagnosticsEngine &Diags, const HeaderSearchOptions &HSO,
@@ -66,11 +83,42 @@ class RawPCHContainerOperations : public PCHContainerOperations {
const LangOptions &LO, const std::string &MainFileName,
const std::string &OutputFileName, llvm::raw_pwrite_stream *OS,
std::shared_ptr<PCHBuffer> Buffer) const override;
+};
- /// \brief Initialize an llvm::BitstreamReader with Buffer.
+/// Implements read operations for a raw pass-through PCH container.
+class RawPCHContainerReader : public PCHContainerReader {
+ StringRef getFormat() const override { return "raw"; }
+
+ /// Initialize an llvm::BitstreamReader with Buffer.
void ExtractPCH(llvm::MemoryBufferRef Buffer,
llvm::BitstreamReader &StreamFile) const override;
};
+
+/// A registry of PCHContainerWriter and -Reader objects for different formats.
+class PCHContainerOperations {
+ llvm::StringMap<std::unique_ptr<PCHContainerWriter>> Writers;
+ llvm::StringMap<std::unique_ptr<PCHContainerReader>> Readers;
+public:
+ /// Automatically registers a RawPCHContainerWriter and
+ /// RawPCHContainerReader.
+ PCHContainerOperations();
+ void registerWriter(std::unique_ptr<PCHContainerWriter> Writer) {
+ Writers[Writer->getFormat()] = std::move(Writer);
+ }
+ void registerReader(std::unique_ptr<PCHContainerReader> Reader) {
+ Readers[Reader->getFormat()] = std::move(Reader);
+ }
+ const PCHContainerWriter *getWriterOrNull(StringRef Format) {
+ return Writers[Format].get();
+ }
+ const PCHContainerReader *getReaderOrNull(StringRef Format) {
+ return Readers[Format].get();
+ }
+ const PCHContainerReader &getRawReader() {
+ return *getReaderOrNull("raw");
+ }
+};
+
}
#endif
diff --git a/include/clang/Frontend/Utils.h b/include/clang/Frontend/Utils.h
index 639965343c45..aa567b4c0279 100644
--- a/include/clang/Frontend/Utils.h
+++ b/include/clang/Frontend/Utils.h
@@ -45,7 +45,7 @@ class HeaderSearch;
class HeaderSearchOptions;
class IdentifierTable;
class LangOptions;
-class PCHContainerOperations;
+class PCHContainerReader;
class Preprocessor;
class PreprocessorOptions;
class PreprocessorOutputOptions;
@@ -63,7 +63,7 @@ void ApplyHeaderSearchOptions(HeaderSearch &HS,
/// InitializePreprocessor - Initialize the preprocessor getting it and the
/// environment ready to process a single file.
void InitializePreprocessor(Preprocessor &PP, const PreprocessorOptions &PPOpts,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
const FrontendOptions &FEOpts);
/// DoPrintPreprocessedInput - Implement -E mode.
diff --git a/include/clang/Lex/HeaderSearchOptions.h b/include/clang/Lex/HeaderSearchOptions.h
index c9c32609f19b..12f0447a1169 100644
--- a/include/clang/Lex/HeaderSearchOptions.h
+++ b/include/clang/Lex/HeaderSearchOptions.h
@@ -92,6 +92,9 @@ public:
/// \brief The directory used for a user build.
std::string ModuleUserBuildPath;
+ /// The module/pch container format.
+ std::string ModuleFormat;
+
/// \brief Whether we should disable the use of the hash string within the
/// module cache.
///
@@ -167,16 +170,14 @@ public:
public:
HeaderSearchOptions(StringRef _Sysroot = "/")
- : Sysroot(_Sysroot), DisableModuleHash(0), ImplicitModuleMaps(0),
- ModuleMapFileHomeIsCwd(0),
- ModuleCachePruneInterval(7*24*60*60),
- ModuleCachePruneAfter(31*24*60*60),
- BuildSessionTimestamp(0),
- UseBuiltinIncludes(true),
- UseStandardSystemIncludes(true), UseStandardCXXIncludes(true),
- UseLibcxx(false), Verbose(false),
- ModulesValidateOncePerBuildSession(false),
- ModulesValidateSystemHeaders(false) {}
+ : Sysroot(_Sysroot), ModuleFormat("raw"), DisableModuleHash(0),
+ ImplicitModuleMaps(0), ModuleMapFileHomeIsCwd(0),
+ ModuleCachePruneInterval(7 * 24 * 60 * 60),
+ ModuleCachePruneAfter(31 * 24 * 60 * 60), BuildSessionTimestamp(0),
+ UseBuiltinIncludes(true), UseStandardSystemIncludes(true),
+ UseStandardCXXIncludes(true), UseLibcxx(false), Verbose(false),
+ ModulesValidateOncePerBuildSession(false),
+ ModulesValidateSystemHeaders(false) {}
/// AddPath - Add the \p Path path to the specified \p Group list.
void AddPath(StringRef Path, frontend::IncludeDirGroup Group,
diff --git a/include/clang/Parse/Parser.h b/include/clang/Parse/Parser.h
index fb9eb8ff5af8..8719555be914 100644
--- a/include/clang/Parse/Parser.h
+++ b/include/clang/Parse/Parser.h
@@ -108,12 +108,13 @@ class Parser : public CodeCompletionHandler {
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
- /// Ident_vector, Ident_pixel, Ident_bool - cached IdentifierInfo's
- /// for "vector", "pixel", and "bool" fast comparison. Only present
- /// if AltiVec enabled.
+ /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
+ /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
- IdentifierInfo *Ident_pixel;
IdentifierInfo *Ident_bool;
+ /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
+ /// Only present if AltiVec enabled.
+ IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
@@ -605,10 +606,12 @@ private:
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
- if (!getLangOpts().AltiVec ||
- (Tok.getIdentifierInfo() != Ident_vector &&
- Tok.getIdentifierInfo() != Ident_pixel &&
- Tok.getIdentifierInfo() != Ident_bool))
+ if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
+ return false;
+
+ if (Tok.getIdentifierInfo() != Ident_vector &&
+ Tok.getIdentifierInfo() != Ident_bool &&
+ (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
@@ -618,7 +621,7 @@ private:
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
- if (!getLangOpts().AltiVec ||
+ if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
diff --git a/include/clang/Sema/Sema.h b/include/clang/Sema/Sema.h
index db7b6f954c1b..72044336a83b 100644
--- a/include/clang/Sema/Sema.h
+++ b/include/clang/Sema/Sema.h
@@ -729,27 +729,12 @@ public:
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
- /// \brief Pointer to NSMutableArray type (NSMutableArray *).
- QualType NSMutableArrayPointer;
-
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
- /// \brief Pointer to NSMutableDictionary type (NSMutableDictionary *).
- QualType NSMutableDictionaryPointer;
-
- /// \brief Pointer to NSMutableSet type (NSMutableSet *).
- QualType NSMutableSetPointer;
-
- /// \brief Pointer to NSCountedSet type (NSCountedSet *).
- QualType NSCountedSetPointer;
-
- /// \brief Pointer to NSMutableOrderedSet type (NSMutableOrderedSet *).
- QualType NSMutableOrderedSetPointer;
-
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
@@ -8363,7 +8348,8 @@ public:
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
- SourceLocation Loc, bool IsCompAssign);
+ SourceLocation Loc, bool IsCompAssign,
+ bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
diff --git a/include/clang/Serialization/ASTReader.h b/include/clang/Serialization/ASTReader.h
index 9377dfac99cc..840655ea43e3 100644
--- a/include/clang/Serialization/ASTReader.h
+++ b/include/clang/Serialization/ASTReader.h
@@ -362,7 +362,7 @@ private:
SourceManager &SourceMgr;
FileManager &FileMgr;
- const PCHContainerOperations &PCHContainerOps;
+ const PCHContainerReader &PCHContainerRdr;
DiagnosticsEngine &Diags;
/// \brief The semantic analysis object that will be processing the
@@ -1289,7 +1289,7 @@ public:
/// \param ReadTimer If non-null, a timer used to track the time spent
/// deserializing.
ASTReader(Preprocessor &PP, ASTContext &Context,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
StringRef isysroot = "", bool DisableValidation = false,
bool AllowASTWithCompilerErrors = false,
bool AllowConfigurationMismatch = false,
@@ -1458,7 +1458,7 @@ public:
/// the AST file, without actually loading the AST file.
static std::string
getOriginalSourceFile(const std::string &ASTFileName, FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
DiagnosticsEngine &Diags);
/// \brief Read the control block for the named AST file.
@@ -1466,13 +1466,13 @@ public:
/// \returns true if an error occurred, false otherwise.
static bool
readASTFileControlBlock(StringRef Filename, FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
ASTReaderListener &Listener);
/// \brief Determine whether the given AST file is acceptable to load into a
/// translation unit with the given language and target options.
static bool isAcceptableASTFile(StringRef Filename, FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
const LangOptions &LangOpts,
const TargetOptions &TargetOpts,
const PreprocessorOptions &PPOpts,
diff --git a/include/clang/Serialization/GlobalModuleIndex.h b/include/clang/Serialization/GlobalModuleIndex.h
index 7e205106c4ee..ba4f7e216aec 100644
--- a/include/clang/Serialization/GlobalModuleIndex.h
+++ b/include/clang/Serialization/GlobalModuleIndex.h
@@ -198,10 +198,9 @@ public:
/// \param Path The path to the directory containing module files, into
/// which the global index will be written.
static ErrorCode writeIndex(FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
StringRef Path);
};
-
}
#endif
diff --git a/include/clang/Serialization/ModuleManager.h b/include/clang/Serialization/ModuleManager.h
index ea4b57fa3ace..ab39aefa940b 100644
--- a/include/clang/Serialization/ModuleManager.h
+++ b/include/clang/Serialization/ModuleManager.h
@@ -24,7 +24,7 @@ namespace clang {
class GlobalModuleIndex;
class ModuleMap;
-class PCHContainerOperations;
+class PCHContainerReader;
namespace serialization {
@@ -52,7 +52,7 @@ class ModuleManager {
FileManager &FileMgr;
/// \brief Knows how to unwrap module containers.
- const PCHContainerOperations &PCHContainerOps;
+ const PCHContainerReader &PCHContainerRdr;
/// \brief A lookup of in-memory (virtual file) buffers
llvm::DenseMap<const FileEntry *, std::unique_ptr<llvm::MemoryBuffer>>
@@ -118,9 +118,9 @@ public:
typedef std::pair<uint32_t, StringRef> ModuleOffset;
explicit ModuleManager(FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps);
+ const PCHContainerReader &PCHContainerRdr);
~ModuleManager();
-
+
/// \brief Forward iterator to traverse all loaded modules. This is reverse
/// source-order.
ModuleIterator begin() { return Chain.begin(); }
diff --git a/include/clang/Tooling/Refactoring.h b/include/clang/Tooling/Refactoring.h
index 944fd41f85c1..54deff6e3661 100644
--- a/include/clang/Tooling/Refactoring.h
+++ b/include/clang/Tooling/Refactoring.h
@@ -40,7 +40,7 @@ public:
RefactoringTool(const CompilationDatabase &Compilations,
ArrayRef<std::string> SourcePaths,
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>());
+ std::make_shared<PCHContainerOperations>());
/// \brief Returns the set of replacements to which replacements should
/// be added during the run of the tool.
diff --git a/include/clang/Tooling/Tooling.h b/include/clang/Tooling/Tooling.h
index fb887e1df9e8..92e9065c6a8a 100644
--- a/include/clang/Tooling/Tooling.h
+++ b/include/clang/Tooling/Tooling.h
@@ -150,7 +150,7 @@ inline std::unique_ptr<FrontendActionFactory> newFrontendActionFactory(
bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
const Twine &FileName = "input.cc",
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>());
+ std::make_shared<PCHContainerOperations>());
/// The first part of the pair is the filename, the second part the
/// file-content.
@@ -171,7 +171,7 @@ bool runToolOnCodeWithArgs(
clang::FrontendAction *ToolAction, const Twine &Code,
const std::vector<std::string> &Args, const Twine &FileName = "input.cc",
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>(),
+ std::make_shared<PCHContainerOperations>(),
const FileContentMappings &VirtualMappedFiles = FileContentMappings());
/// \brief Builds an AST for 'Code'.
@@ -185,7 +185,7 @@ bool runToolOnCodeWithArgs(
std::unique_ptr<ASTUnit>
buildASTFromCode(const Twine &Code, const Twine &FileName = "input.cc",
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>());
+ std::make_shared<PCHContainerOperations>());
/// \brief Builds an AST for 'Code' with additional flags.
///
@@ -200,7 +200,7 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
const Twine &Code, const std::vector<std::string> &Args,
const Twine &FileName = "input.cc",
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>());
+ std::make_shared<PCHContainerOperations>());
/// \brief Utility to run a FrontendAction in a single clang invocation.
class ToolInvocation {
@@ -219,7 +219,7 @@ public:
ToolInvocation(std::vector<std::string> CommandLine, FrontendAction *FAction,
FileManager *Files,
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>());
+ std::make_shared<PCHContainerOperations>());
/// \brief Create a tool invocation.
///
@@ -288,7 +288,7 @@ class ClangTool {
ClangTool(const CompilationDatabase &Compilations,
ArrayRef<std::string> SourcePaths,
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>());
+ std::make_shared<PCHContainerOperations>());
~ClangTool();
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index f33ad21cf029..e32884218597 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -167,7 +167,7 @@ static bool HasARCRuntime(CompilerInvocation &origCI) {
static CompilerInvocation *
createInvocationForMigration(CompilerInvocation &origCI,
- const PCHContainerOperations &PCHContainerOps) {
+ const PCHContainerReader &PCHContainerRdr) {
std::unique_ptr<CompilerInvocation> CInvok;
CInvok.reset(new CompilerInvocation(origCI));
PreprocessorOptions &PPOpts = CInvok->getPreprocessorOpts();
@@ -180,7 +180,7 @@ createInvocationForMigration(CompilerInvocation &origCI,
new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(),
new IgnoringDiagConsumer()));
std::string OriginalFile = ASTReader::getOriginalSourceFile(
- PPOpts.ImplicitPCHInclude, FileMgr, PCHContainerOps, *Diags);
+ PPOpts.ImplicitPCHInclude, FileMgr, PCHContainerRdr, *Diags);
if (!OriginalFile.empty())
PPOpts.Includes.insert(PPOpts.Includes.begin(), OriginalFile);
PPOpts.ImplicitPCHInclude.clear();
@@ -247,7 +247,8 @@ bool arcmt::checkForManualIssues(
assert(!transforms.empty());
std::unique_ptr<CompilerInvocation> CInvok;
- CInvok.reset(createInvocationForMigration(origCI, *PCHContainerOps));
+ CInvok.reset(
+ createInvocationForMigration(origCI, PCHContainerOps->getRawReader()));
CInvok->getFrontendOpts().Inputs.clear();
CInvok->getFrontendOpts().Inputs.push_back(Input);
@@ -517,7 +518,8 @@ MigrationProcess::MigrationProcess(
bool MigrationProcess::applyTransform(TransformFn trans,
RewriteListener *listener) {
std::unique_ptr<CompilerInvocation> CInvok;
- CInvok.reset(createInvocationForMigration(OrigCI, *PCHContainerOps));
+ CInvok.reset(
+ createInvocationForMigration(OrigCI, PCHContainerOps->getRawReader()));
CInvok->getDiagnosticOpts().IgnoreWarnings = true;
Remapper.applyMappings(CInvok->getPreprocessorOpts());
diff --git a/lib/AST/NSAPI.cpp b/lib/AST/NSAPI.cpp
index a9b10ed451c0..c9264d59aae3 100644
--- a/lib/AST/NSAPI.cpp
+++ b/lib/AST/NSAPI.cpp
@@ -9,6 +9,7 @@
#include "clang/AST/NSAPI.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "llvm/ADT/StringSwitch.h"
@@ -29,7 +30,6 @@ IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const {
"NSMutableDictionary",
"NSNumber",
"NSMutableSet",
- "NSCountedSet",
"NSMutableOrderedSet",
"NSValue"
};
@@ -511,6 +511,26 @@ bool NSAPI::isMacroDefined(StringRef Id) const {
return Ctx.Idents.get(Id).hasMacroDefinition();
}
+bool NSAPI::isSubclassOfNSClass(ObjCInterfaceDecl *InterfaceDecl,
+ NSClassIdKindKind NSClassKind) const {
+ if (!InterfaceDecl) {
+ return false;
+ }
+
+ IdentifierInfo *NSClassID = getNSClassId(NSClassKind);
+
+ bool IsSubclass = false;
+ do {
+ IsSubclass = NSClassID == InterfaceDecl->getIdentifier();
+
+ if (IsSubclass) {
+ break;
+ }
+ } while ((InterfaceDecl = InterfaceDecl->getSuperClass()));
+
+ return IsSubclass;
+}
+
bool NSAPI::isObjCTypedef(QualType T,
StringRef name, IdentifierInfo *&II) const {
if (!Ctx.getLangOpts().ObjC1)
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index 50a00502ca9f..97425d001de0 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -435,17 +435,19 @@ TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
namespace {
void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
unsigned &BufferCapacity) {
+ if (Start == End)
+ return;
+
if (BufferSize + (End - Start) > BufferCapacity) {
// Reallocate the buffer.
- unsigned NewCapacity
- = std::max((unsigned)(BufferCapacity? BufferCapacity * 2
- : sizeof(void*) * 2),
- (unsigned)(BufferSize + (End - Start)));
+ unsigned NewCapacity = std::max(
+ (unsigned)(BufferCapacity ? BufferCapacity * 2 : sizeof(void *) * 2),
+ (unsigned)(BufferSize + (End - Start)));
char *NewBuffer = static_cast<char *>(malloc(NewCapacity));
- memcpy(NewBuffer, Buffer, BufferSize);
-
- if (BufferCapacity)
+ if (BufferCapacity) {
+ memcpy(NewBuffer, Buffer, BufferSize);
free(Buffer);
+ }
Buffer = NewBuffer;
BufferCapacity = NewCapacity;
}
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 388c91c9cbc0..de7bcb826ebb 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -2014,6 +2014,12 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
continue;
}
+ // If the key function is dllimport but the class isn't, then the class has
+ // no key function. The DLL that exports the key function won't export the
+ // vtable in this case.
+ if (MD->hasAttr<DLLImportAttr>() && !RD->hasAttr<DLLImportAttr>())
+ return nullptr;
+
// We found it.
return MD;
}
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index c0aab4c4db7d..e6292b495365 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -724,6 +724,8 @@ MSAsmStmt::MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
}
static StringRef copyIntoContext(const ASTContext &C, StringRef str) {
+ if (str.empty())
+ return StringRef();
size_t size = str.size();
char *buffer = new (C) char[size];
memcpy(buffer, str.data(), size);
@@ -1499,6 +1501,12 @@ void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
std::copy(A.begin(), A.end(), getCounters().begin());
}
+void OMPLoopDirective::setInits(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter inits is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getInits().begin());
+}
+
void OMPLoopDirective::setUpdates(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of counter updates is not the same as the collapsed number");
@@ -1664,6 +1672,7 @@ OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setInit(Exprs.Init);
Dir->setInc(Exprs.Inc);
Dir->setCounters(Exprs.Counters);
+ Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
@@ -1710,6 +1719,7 @@ OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setCounters(Exprs.Counters);
+ Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
@@ -1756,6 +1766,7 @@ OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setCounters(Exprs.Counters);
+ Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
@@ -1911,6 +1922,7 @@ OMPParallelForDirective *OMPParallelForDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setCounters(Exprs.Counters);
+ Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
@@ -1955,6 +1967,7 @@ OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setCounters(Exprs.Counters);
+ Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index 1a636aefa639..d4927443aa9c 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -587,4 +587,6 @@ void FileManager::PrintStats() const {
//llvm::errs() << PagesMapped << BytesOfPagesMapped << FSLookups;
}
-PCHContainerOperations::~PCHContainerOperations() {}
+// Virtual destructors for abstract base classes that need live in Basic.
+PCHContainerWriter::~PCHContainerWriter() {}
+PCHContainerReader::~PCHContainerReader() {}
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index dcb7603bf5ab..7705834d91a0 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -110,7 +110,8 @@ namespace {
HALFSUPPORT = 0x08000,
KEYCONCEPTS = 0x10000,
KEYOBJC2 = 0x20000,
- KEYALL = (0x3ffff & ~KEYNOMS18 &
+ KEYZVECTOR = 0x40000,
+ KEYALL = (0x7ffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index 3846fecebf5d..4314b41eb340 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -67,6 +67,7 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Case("objc_arc", LangOpts.ObjCAutoRefCount)
.Case("opencl", LangOpts.OpenCL)
.Case("tls", Target.isTLSSupported())
+ .Case("zvector", LangOpts.ZVector)
.Default(Target.hasFeature(Feature));
if (!HasFeature)
HasFeature = std::find(LangOpts.ModuleFeatures.begin(),
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 3cf74bc84926..e4db004b4948 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -4978,7 +4978,6 @@ public:
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
MaxVectorAlign = 128;
- RegParmMax = 8;
MaxAtomicInlineWidth = 128;
MaxAtomicPromoteWidth = 128;
@@ -5726,6 +5725,8 @@ public:
Builder.defineMacro("__LONG_DOUBLE_128__");
if (HasTransactionalExecution)
Builder.defineMacro("__HTM__");
+ if (Opts.ZVector)
+ Builder.defineMacro("__VEC__", "10301");
}
void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const override {
@@ -6967,6 +6968,10 @@ public:
: LinuxTargetInfo<X86_64TargetInfo>(Triple) {
LongDoubleFormat = &llvm::APFloat::IEEEquad;
}
+
+ bool useFloat128ManglingForLongDouble() const override {
+ return true;
+ }
};
} // end anonymous namespace
@@ -7037,11 +7042,10 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new NaClTargetInfo<ARMleTargetInfo>(Triple);
case llvm::Triple::Win32:
switch (Triple.getEnvironment()) {
- default:
- return new ARMleTargetInfo(Triple);
case llvm::Triple::Itanium:
return new ItaniumWindowsARMleTargetInfo(Triple);
case llvm::Triple::MSVC:
+ default: // Assume MSVC for unknown environments
return new MicrosoftARMleTargetInfo(Triple);
}
default:
@@ -7296,14 +7300,13 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new SolarisTargetInfo<X86_32TargetInfo>(Triple);
case llvm::Triple::Win32: {
switch (Triple.getEnvironment()) {
- default:
- return new X86_32TargetInfo(Triple);
case llvm::Triple::Cygnus:
return new CygwinX86_32TargetInfo(Triple);
case llvm::Triple::GNU:
return new MinGWX86_32TargetInfo(Triple);
case llvm::Triple::Itanium:
case llvm::Triple::MSVC:
+ default: // Assume MSVC for unknown environments
return new MicrosoftX86_32TargetInfo(Triple);
}
}
@@ -7348,11 +7351,10 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new SolarisTargetInfo<X86_64TargetInfo>(Triple);
case llvm::Triple::Win32: {
switch (Triple.getEnvironment()) {
- default:
- return new X86_64TargetInfo(Triple);
case llvm::Triple::GNU:
return new MinGWX86_64TargetInfo(Triple);
case llvm::Triple::MSVC:
+ default: // Assume MSVC for unknown environments
return new MicrosoftX86_64TargetInfo(Triple);
}
}
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index a1a67c2bc144..725b7fe8c4f5 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -36,7 +36,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_370/final/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 9e538703177d..9b8694f9c5f2 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -866,14 +866,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
- Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
+ Value *Depth =
+ CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_frame_address: {
- Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
+ Value *Depth =
+ CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
@@ -6238,6 +6238,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// but less than two lanes, convert to shifting in zeroes.
if (ShiftVal > NumLaneElts) {
ShiftVal -= NumLaneElts;
+ Ops[1] = Ops[0];
Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 4c77a8d28974..82680a84d328 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -484,8 +484,10 @@ private:
/// are concatenated.
StringRef internString(StringRef A, StringRef B = StringRef()) {
char *Data = DebugInfoNames.Allocate<char>(A.size() + B.size());
- std::memcpy(Data, A.data(), A.size());
- std::memcpy(Data + A.size(), B.data(), B.size());
+ if (!A.empty())
+ std::memcpy(Data, A.data(), A.size());
+ if (!B.empty())
+ std::memcpy(Data + A.size(), B.data(), B.size());
return StringRef(Data, A.size() + B.size());
}
};
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index c73f1189314e..74f6019b1a2c 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -1166,6 +1166,16 @@ static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
return llvm::ConstantInt::get(I32Ty, Off+MV);
}
+static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
+ if (C->getBitWidth() != 32) {
+ assert(llvm::ConstantInt::isValueValidForType(I32Ty,
+ C->getZExtValue()) &&
+ "Index operand too large for shufflevector mask!");
+ return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
+ }
+ return C;
+}
+
Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
bool Ignore = TestAndClearIgnoreResultAssign();
(void)Ignore;
@@ -1216,7 +1226,8 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Value *LHS = nullptr, *RHS = nullptr;
if (CurIdx == 0) {
// insert into undef -> shuffle (src, undef)
- Args.push_back(C);
+ // shufflemask must use an i32
+ Args.push_back(getAsInt32(C, CGF.Int32Ty));
Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
LHS = EI->getVectorOperand();
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index a1f093a8d866..e5f507aa41bd 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -227,10 +227,22 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
QualType Type = VD->getType();
if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
- // Get the address of the master variable.
- auto *MasterAddr = VD->isStaticLocal()
- ? CGM.getStaticLocalDeclAddress(VD)
- : CGM.GetAddrOfGlobal(VD);
+
+ // Get the address of the master variable. If we are emitting code with
+ // TLS support, the address is passed from the master as field in the
+ // captured declaration.
+ llvm::Value *MasterAddr;
+ if (getLangOpts().OpenMPUseTLS &&
+ getContext().getTargetInfo().isTLSSupported()) {
+ assert(CapturedStmtInfo->lookup(VD) &&
+ "Copyin threadprivates should have been captured!");
+ DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
+ VK_LValue, (*IRef)->getExprLoc());
+ MasterAddr = EmitLValue(&DRE).getAddress();
+ } else {
+ MasterAddr = VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
+ : CGM.GetAddrOfGlobal(VD);
+ }
// Get the address of the threadprivate variable.
auto *PrivateAddr = EmitLValue(*IRef).getAddress();
if (CopiedVars.size() == 1) {
@@ -686,27 +698,9 @@ static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
{
CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
emitPrivateLoopCounters(CGF, PreCondScope, S.counters());
- const VarDecl *IVDecl =
- cast<VarDecl>(cast<DeclRefExpr>(S.getIterationVariable())->getDecl());
- bool IsRegistered = PreCondScope.addPrivate(IVDecl, [&]() -> llvm::Value *{
- // Emit var without initialization.
- auto VarEmission = CGF.EmitAutoVarAlloca(*IVDecl);
- CGF.EmitAutoVarCleanups(VarEmission);
- return VarEmission.getAllocatedAddress();
- });
- assert(IsRegistered && "counter already registered as private");
- // Silence the warning about unused variable.
- (void)IsRegistered;
(void)PreCondScope.Privatize();
- // Initialize internal counter to 0 to calculate initial values of real
- // counters.
- LValue IV = CGF.EmitLValue(S.getIterationVariable());
- CGF.EmitStoreOfScalar(
- llvm::ConstantInt::getNullValue(
- IV.getAddress()->getType()->getPointerElementType()),
- CGF.EmitLValue(S.getIterationVariable()), /*isInit=*/true);
// Get initial values of real counters.
- for (auto I : S.updates()) {
+ for (auto I : S.inits()) {
CGF.EmitIgnoredExpr(I);
}
}
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index e36051c2053b..1b7f1d76042d 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -56,6 +56,21 @@ static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
CGM.setGlobalVisibility(Fn, MD);
}
+static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk,
+ llvm::Function *ThunkFn, bool ForVTable,
+ GlobalDecl GD) {
+ CGM.setFunctionLinkage(GD, ThunkFn);
+ CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD,
+ !Thunk.Return.isEmpty());
+
+ // Set the right visibility.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ setThunkVisibility(CGM, MD, Thunk, ThunkFn);
+
+ if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker())
+ ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
+}
+
#ifndef NDEBUG
static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
const ABIArgInfo &infoR, CanQualType typeR) {
@@ -429,8 +444,7 @@ void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
return;
}
- // Change the linkage.
- CGM.setFunctionLinkage(GD, ThunkFn);
+ setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD);
return;
}
@@ -451,16 +465,7 @@ void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, Thunk);
}
- CGM.setFunctionLinkage(GD, ThunkFn);
- CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD,
- !Thunk.Return.isEmpty());
-
- // Set the right visibility.
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- setThunkVisibility(CGM, MD, Thunk, ThunkFn);
-
- if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker())
- ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
+ setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD);
}
void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD,
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 70af69b76457..2be9ceb1d637 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -2420,10 +2420,13 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
// FIXME: this may need to be reconsidered if the key function
// changes.
+ // N.B. We must always emit the RTTI data ourselves if there exists a key
+ // function.
+ bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
if (CGM.getVTables().isVTableExternal(RD))
- return true;
+ return IsDLLImport ? false : true;
- if (RD->hasAttr<DLLImportAttr>())
+ if (IsDLLImport)
return true;
}
@@ -2653,8 +2656,15 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
if (RD->hasAttr<WeakAttr>())
return llvm::GlobalValue::WeakODRLinkage;
- if (RD->isDynamicClass())
- return CGM.getVTableLinkage(RD);
+ if (RD->isDynamicClass()) {
+ llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD);
+ // MinGW won't export the RTTI information when there is a key function.
+ // Make sure we emit our own copy instead of attempting to dllimport it.
+ if (RD->hasAttr<DLLImportAttr>() &&
+ llvm::GlobalValue::isAvailableExternallyLinkage(LT))
+ LT = llvm::GlobalValue::LinkOnceODRLinkage;
+ return LT;
+ }
}
return llvm::GlobalValue::LinkOnceODRLinkage;
diff --git a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 8f04e06988d8..9c9b1234a66f 100644
--- a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -156,7 +156,7 @@ public:
} // namespace
std::unique_ptr<ASTConsumer>
-ObjectFilePCHContainerOperations::CreatePCHContainerGenerator(
+ObjectFilePCHContainerWriter::CreatePCHContainerGenerator(
DiagnosticsEngine &Diags, const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO, const TargetOptions &TO,
const LangOptions &LO, const std::string &MainFileName,
@@ -166,7 +166,7 @@ ObjectFilePCHContainerOperations::CreatePCHContainerGenerator(
Diags, HSO, PPO, TO, LO, MainFileName, OutputFileName, OS, Buffer);
}
-void ObjectFilePCHContainerOperations::ExtractPCH(
+void ObjectFilePCHContainerReader::ExtractPCH(
llvm::MemoryBufferRef Buffer, llvm::BitstreamReader &StreamFile) const {
if (auto OF = llvm::object::ObjectFile::createObjectFile(Buffer)) {
auto *Obj = OF.get().get();
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 6226d213b620..48a8b37e8dbc 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -1858,13 +1858,20 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Hi = Integer;
} else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
Current = Integer;
- } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
- (k == BuiltinType::LongDouble &&
- getTarget().getTriple().isOSNaCl())) {
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
Current = SSE;
} else if (k == BuiltinType::LongDouble) {
- Lo = X87;
- Hi = X87Up;
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::IEEEquad) {
+ Lo = SSE;
+ Hi = SSEUp;
+ } else if (LDF == &llvm::APFloat::x87DoubleExtended) {
+ Lo = X87;
+ Hi = X87Up;
+ } else if (LDF == &llvm::APFloat::IEEEdouble) {
+ Current = SSE;
+ } else
+ llvm_unreachable("unexpected long double representation!");
}
// FIXME: _Decimal32 and _Decimal64 are SSE.
// FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
@@ -1967,14 +1974,21 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Current = Integer;
else if (Size <= 128)
Lo = Hi = Integer;
- } else if (ET == getContext().FloatTy)
+ } else if (ET == getContext().FloatTy) {
Current = SSE;
- else if (ET == getContext().DoubleTy ||
- (ET == getContext().LongDoubleTy &&
- getTarget().getTriple().isOSNaCl()))
+ } else if (ET == getContext().DoubleTy) {
Lo = Hi = SSE;
- else if (ET == getContext().LongDoubleTy)
- Current = ComplexX87;
+ } else if (ET == getContext().LongDoubleTy) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::IEEEquad)
+ Current = Memory;
+ else if (LDF == &llvm::APFloat::x87DoubleExtended)
+ Current = ComplexX87;
+ else if (LDF == &llvm::APFloat::IEEEdouble)
+ Lo = Hi = SSE;
+ else
+ llvm_unreachable("unexpected long double representation!");
+ }
// If this complex type crosses an eightbyte boundary then it
// should be split.
@@ -2243,7 +2257,8 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
Ty = QualType(InnerTy, 0);
llvm::Type *IRType = CGT.ConvertType(Ty);
- if(isa<llvm::VectorType>(IRType))
+ if (isa<llvm::VectorType>(IRType) ||
+ IRType->getTypeID() == llvm::Type::FP128TyID)
return IRType;
// We couldn't find the preferred IR vector type for 'Ty'.
diff --git a/lib/Driver/MinGWToolChain.cpp b/lib/Driver/MinGWToolChain.cpp
index 197c19e91413..938440b08f60 100644
--- a/lib/Driver/MinGWToolChain.cpp
+++ b/lib/Driver/MinGWToolChain.cpp
@@ -20,12 +20,54 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
+namespace {
+// Simplified from Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple.
+bool findGccVersion(StringRef LibDir, std::string &GccLibDir,
+ std::string &Ver) {
+ Generic_GCC::GCCVersion Version = Generic_GCC::GCCVersion::Parse("0.0.0");
+ std::error_code EC;
+ for (llvm::sys::fs::directory_iterator LI(LibDir, EC), LE; !EC && LI != LE;
+ LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ Generic_GCC::GCCVersion CandidateVersion =
+ Generic_GCC::GCCVersion::Parse(VersionText);
+ if (CandidateVersion.Major == -1)
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+ Ver = VersionText;
+ GccLibDir = LI->path();
+ }
+ return Ver.size();
+}
+}
+
+void MinGW::findGccLibDir() {
+ llvm::SmallVector<llvm::SmallString<32>, 2> Archs;
+ Archs.emplace_back(getTriple().getArchName());
+ Archs[0] += "-w64-mingw32";
+ Archs.emplace_back("mingw32");
+ Arch = Archs[0].str();
+ // lib: Arch Linux, Ubuntu, Windows
+ // lib64: openSUSE Linux
+ for (StringRef CandidateLib : {"lib", "lib64"}) {
+ for (StringRef CandidateArch : Archs) {
+ llvm::SmallString<1024> LibDir(Base);
+ llvm::sys::path::append(LibDir, CandidateLib, "gcc", CandidateArch);
+ if (findGccVersion(LibDir, GccLibDir, Ver)) {
+ Arch = CandidateArch;
+ return;
+ }
+ }
+ }
+}
+
MinGW::MinGW(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
- llvm::SmallString<1024> LibDir;
-
+// In Windows there aren't any standard install locations, we search
+// for gcc on the PATH. In Linux the base is always /usr.
#ifdef LLVM_ON_WIN32
if (getDriver().SysRoot.size())
Base = getDriver().SysRoot;
@@ -35,45 +77,23 @@ MinGW::MinGW(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
llvm::sys::path::parent_path(GPPName.get()));
else
Base = llvm::sys::path::parent_path(getDriver().getInstalledDir());
- Base += llvm::sys::path::get_separator();
- LibDir = Base;
- llvm::sys::path::append(LibDir, "lib", "gcc");
#else
if (getDriver().SysRoot.size())
Base = getDriver().SysRoot;
else
- Base = "/usr/";
- LibDir = Base;
- llvm::sys::path::append(LibDir, "lib64", "gcc");
+ Base = "/usr";
#endif
- LibDir += llvm::sys::path::get_separator();
-
- // First look for mingw-w64.
- Arch = getTriple().getArchName();
- Arch += "-w64-mingw32";
- std::error_code EC;
- llvm::sys::fs::directory_iterator MingW64Entry(LibDir + Arch, EC);
- if (!EC) {
- GccLibDir = MingW64Entry->path();
- Ver = llvm::sys::path::filename(GccLibDir);
- } else {
- // If mingw-w64 not found, try looking for mingw.org.
- Arch = "mingw32";
- llvm::sys::fs::directory_iterator MingwOrgEntry(LibDir + Arch, EC);
- if (!EC)
- GccLibDir = MingwOrgEntry->path();
- }
- Arch += llvm::sys::path::get_separator();
+ Base += llvm::sys::path::get_separator();
+ findGccLibDir();
// GccLibDir must precede Base/lib so that the
// correct crtbegin.o ,cetend.o would be found.
getFilePaths().push_back(GccLibDir);
+ getFilePaths().push_back(
+ (Base + Arch + llvm::sys::path::get_separator() + "lib").str());
getFilePaths().push_back(Base + "lib");
- getFilePaths().push_back(Base + Arch + "lib");
-#ifdef LLVM_ON_UNIX
- // For openSUSE.
- getFilePaths().push_back(Base + Arch + "sys-root/mingw/lib");
-#endif
+ // openSUSE
+ getFilePaths().push_back(Base + Arch + "/sys-root/mingw/lib");
}
bool MinGW::IsIntegratedAssemblerDefault() const { return true; }
@@ -115,6 +135,58 @@ bool MinGW::UseSEHExceptions() const {
return getArch() == llvm::Triple::x86_64;
}
+// Include directories for various hosts:
+
+// Windows, mingw.org
+// c:\mingw\lib\gcc\mingw32\4.8.1\include\c++
+// c:\mingw\lib\gcc\mingw32\4.8.1\include\c++\mingw32
+// c:\mingw\lib\gcc\mingw32\4.8.1\include\c++\backward
+// c:\mingw\lib\gcc\mingw32\4.8.1\include
+// c:\mingw\include
+// c:\mingw\lib\gcc\mingw32\4.8.1\include-fixed
+// c:\mingw\mingw32\include
+
+// Windows, mingw-w64 mingw-builds
+// c:\mingw32\lib\gcc\i686-w64-mingw32\4.9.1\include
+// c:\mingw32\lib\gcc\i686-w64-mingw32\4.9.1\include-fixed
+// c:\mingw32\i686-w64-mingw32\include
+// c:\mingw32\i686-w64-mingw32\include\c++
+// c:\mingw32\i686-w64-mingw32\include\c++\i686-w64-mingw32
+// c:\mingw32\i686-w64-mingw32\include\c++\backward
+
+// Windows, mingw-w64 msys2
+// c:\msys64\mingw32\lib\gcc\i686-w64-mingw32\4.9.2\include
+// c:\msys64\mingw32\include
+// c:\msys64\mingw32\lib\gcc\i686-w64-mingw32\4.9.2\include-fixed
+// c:\msys64\mingw32\i686-w64-mingw32\include
+// c:\msys64\mingw32\include\c++\4.9.2
+// c:\msys64\mingw32\include\c++\4.9.2\i686-w64-mingw32
+// c:\msys64\mingw32\include\c++\4.9.2\backward
+
+// openSUSE
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed
+// /usr/x86_64-w64-mingw32/sys-root/mingw/include
+
+// Arch Linux
+// /usr/i686-w64-mingw32/include/c++/5.1.0
+// /usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32
+// /usr/i686-w64-mingw32/include/c++/5.1.0/backward
+// /usr/lib/gcc/i686-w64-mingw32/5.1.0/include
+// /usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed
+// /usr/i686-w64-mingw32/include
+
+// Ubuntu
+// /usr/include/c++/4.8
+// /usr/include/c++/4.8/x86_64-w64-mingw32
+// /usr/include/c++/4.8/backward
+// /usr/lib/gcc/x86_64-w64-mingw32/4.8/include
+// /usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed
+// /usr/x86_64-w64-mingw32/include
+
void MinGW::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc))
@@ -129,17 +201,18 @@ void MinGW::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
- llvm::SmallString<1024> IncludeDir(GccLibDir);
- llvm::sys::path::append(IncludeDir, "include");
- addSystemInclude(DriverArgs, CC1Args, IncludeDir.c_str());
- IncludeDir += "-fixed";
-#ifdef LLVM_ON_UNIX
- // For openSUSE.
+ if (GetRuntimeLibType(DriverArgs) == ToolChain::RLT_Libgcc) {
+ llvm::SmallString<1024> IncludeDir(GccLibDir);
+ llvm::sys::path::append(IncludeDir, "include");
+ addSystemInclude(DriverArgs, CC1Args, IncludeDir.c_str());
+ IncludeDir += "-fixed";
+ // openSUSE
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + Arch + "/sys-root/mingw/include");
+ addSystemInclude(DriverArgs, CC1Args, IncludeDir.c_str());
+ }
addSystemInclude(DriverArgs, CC1Args,
- "/usr/x86_64-w64-mingw32/sys-root/mingw/include");
-#endif
- addSystemInclude(DriverArgs, CC1Args, IncludeDir.c_str());
- addSystemInclude(DriverArgs, CC1Args, Base + Arch + "include");
+ Base + Arch + llvm::sys::path::get_separator() + "include");
addSystemInclude(DriverArgs, CC1Args, Base + "include");
}
@@ -149,27 +222,29 @@ void MinGW::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
DriverArgs.hasArg(options::OPT_nostdincxx))
return;
- // C++ includes may be found in several locations depending on distribution.
- // Windows
- // -------
- // mingw-w64 mingw-builds: $sysroot/i686-w64-mingw32/include/c++.
- // mingw-w64 msys2: $sysroot/include/c++/4.9.2
- // mingw.org: GccLibDir/include/c++
- //
- // Linux
- // -----
- // openSUSE: GccLibDir/include/c++
- llvm::SmallVector<llvm::SmallString<1024>, 3> CppIncludeBases;
- CppIncludeBases.emplace_back(Base);
- llvm::sys::path::append(CppIncludeBases[0], Arch, "include", "c++");
- CppIncludeBases.emplace_back(Base);
- llvm::sys::path::append(CppIncludeBases[1], "include", "c++", Ver);
- CppIncludeBases.emplace_back(GccLibDir);
- llvm::sys::path::append(CppIncludeBases[2], "include", "c++");
- for (auto &CppIncludeBase : CppIncludeBases) {
- CppIncludeBase += llvm::sys::path::get_separator();
- addSystemInclude(DriverArgs, CC1Args, CppIncludeBase);
- addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + Arch);
- addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + "backward");
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + "include" + llvm::sys::path::get_separator() +
+ "c++" + llvm::sys::path::get_separator() + "v1");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ llvm::SmallVector<llvm::SmallString<1024>, 4> CppIncludeBases;
+ CppIncludeBases.emplace_back(Base);
+ llvm::sys::path::append(CppIncludeBases[0], Arch, "include", "c++");
+ CppIncludeBases.emplace_back(Base);
+ llvm::sys::path::append(CppIncludeBases[1], Arch, "include", "c++", Ver);
+ CppIncludeBases.emplace_back(Base);
+ llvm::sys::path::append(CppIncludeBases[2], "include", "c++", Ver);
+ CppIncludeBases.emplace_back(GccLibDir);
+ llvm::sys::path::append(CppIncludeBases[3], "include", "c++");
+ for (auto &CppIncludeBase : CppIncludeBases) {
+ addSystemInclude(DriverArgs, CC1Args, CppIncludeBase);
+ CppIncludeBase += llvm::sys::path::get_separator();
+ addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + Arch);
+ addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + "backward");
+ }
+ break;
}
}
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index e6a1bc9685de..d40bb951e50a 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -305,12 +305,17 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
// Thumb2 is the default for V7 on Darwin.
//
// FIXME: Thumb should just be another -target-feaure, not in the triple.
+ StringRef MCPU, MArch;
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ MCPU = A->getValue();
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ MArch = A->getValue();
std::string CPU = Triple.isOSBinFormatMachO()
- ? tools::arm::getARMCPUForMArch(Args, Triple)
- : tools::arm::getARMTargetCPU(Args, Triple);
+ ? tools::arm::getARMCPUForMArch(MArch, Triple)
+ : tools::arm::getARMTargetCPU(MCPU, MArch, Triple);
StringRef Suffix =
tools::arm::getLLVMArchSuffixForARM(CPU,
- tools::arm::getARMArch(Args, Triple));
+ tools::arm::getARMArch(MArch, Triple));
bool ThumbDefault = Suffix.startswith("v6m") || Suffix.startswith("v7m") ||
Suffix.startswith("v7em") ||
(Suffix.startswith("v7") && getTriple().isOSBinFormatMachO());
diff --git a/lib/Driver/ToolChains.h b/lib/Driver/ToolChains.h
index 327ff9b13b19..59eaade6b51c 100644
--- a/lib/Driver/ToolChains.h
+++ b/lib/Driver/ToolChains.h
@@ -29,7 +29,7 @@ namespace toolchains {
/// all subcommands; this relies on gcc translating the majority of
/// command line options.
class LLVM_LIBRARY_VISIBILITY Generic_GCC : public ToolChain {
-protected:
+public:
/// \brief Struct to store and manipulate GCC versions.
///
/// We rely on assumptions about the form and structure of GCC version
@@ -147,6 +147,7 @@ protected:
bool NeedsBiarchSuffix = false);
};
+protected:
GCCInstallationDetector GCCInstallation;
public:
@@ -556,6 +557,7 @@ private:
std::string Arch;
mutable std::unique_ptr<tools::gcc::Preprocessor> Preprocessor;
mutable std::unique_ptr<tools::gcc::Compiler> Compiler;
+ void findGccLibDir();
};
class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_ELF {
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 8c11992f1f82..19ebbb91ffc7 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -502,11 +502,46 @@ static bool isNoCommonDefault(const llvm::Triple &Triple) {
}
}
+// ARM tools start.
+
+// Get SubArch (vN).
+static int getARMSubArchVersionNumber(const llvm::Triple &Triple) {
+ llvm::StringRef Arch = Triple.getArchName();
+ return llvm::ARMTargetParser::parseArchVersion(Arch);
+}
+
+// True if M-profile.
+static bool isARMMProfile(const llvm::Triple &Triple) {
+ llvm::StringRef Arch = Triple.getArchName();
+ unsigned Profile = llvm::ARMTargetParser::parseArchProfile(Arch);
+ return Profile == llvm::ARM::PK_M;
+}
+
+// Get Arch/CPU from args.
+static void getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
+ llvm::StringRef &CPU, bool FromAs = false) {
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ CPU = A->getValue();
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ Arch = A->getValue();
+ if (!FromAs)
+ return;
+
+ for (const Arg *A :
+ Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
+ StringRef Value = A->getValue();
+ if (Value.startswith("-mcpu="))
+ CPU = Value.substr(6);
+ if (Value.startswith("-march="))
+ Arch = Value.substr(7);
+ }
+}
+
// Handle -mhwdiv=.
+// FIXME: Use ARMTargetParser.
static void getARMHWDivFeatures(const Driver &D, const Arg *A,
- const ArgList &Args,
+ const ArgList &Args, StringRef HWDiv,
std::vector<const char *> &Features) {
- StringRef HWDiv = A->getValue();
if (HWDiv == "arm") {
Features.push_back("+hwdiv-arm");
Features.push_back("-hwdiv");
@@ -525,23 +560,32 @@ static void getARMHWDivFeatures(const Driver &D, const Arg *A,
// Handle -mfpu=.
static void getARMFPUFeatures(const Driver &D, const Arg *A,
- const ArgList &Args,
+ const ArgList &Args, StringRef FPU,
std::vector<const char *> &Features) {
- StringRef FPU = A->getValue();
unsigned FPUID = llvm::ARMTargetParser::parseFPU(FPU);
if (!llvm::ARMTargetParser::getFPUFeatures(FPUID, Features))
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
-static int getARMSubArchVersionNumber(const llvm::Triple &Triple) {
- llvm::StringRef Arch = Triple.getArchName();
- return llvm::ARMTargetParser::parseArchVersion(Arch);
+// Check if -march is valid by checking if it can be canonicalised and parsed.
+// getARMArch is used here instead of just checking the -march value in order
+// to handle -march=native correctly.
+static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
+ llvm::StringRef ArchName,
+ const llvm::Triple &Triple) {
+ std::string MArch = arm::getARMArch(ArchName, Triple);
+ if (llvm::ARMTargetParser::parseArch(MArch) == llvm::ARM::AK_INVALID)
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
-static bool isARMMProfile(const llvm::Triple &Triple) {
- llvm::StringRef Arch = Triple.getArchName();
- unsigned Profile = llvm::ARMTargetParser::parseArchProfile(Arch);
- return Profile == llvm::ARM::PK_M;
+// Check -mcpu=. Needs ArchName to handle -mcpu=generic.
+static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
+ llvm::StringRef CPUName, llvm::StringRef ArchName,
+ const llvm::Triple &Triple) {
+ std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
+ std::string Arch = arm::getARMArch(ArchName, Triple);
+ if (strcmp(arm::getLLVMArchSuffixForARM(CPU, Arch), "") == 0)
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
// Select the float ABI as determined by -msoft-float, -mhard-float, and
@@ -641,6 +685,9 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
StringRef FloatABI = tools::arm::getARMFloatABI(D, Args, Triple);
+ const Arg *WaCPU = nullptr, *WaFPU = nullptr;
+ const Arg *WaHDiv = nullptr, *WaArch = nullptr;
+
if (!ForAS) {
// FIXME: Note, this is a hack, the LLVM backend doesn't actually use these
// yet (it uses the -mfloat-abi and -msoft-float options), and it is
@@ -661,31 +708,75 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
// Use software floating point argument passing?
if (FloatABI != "hard")
Features.push_back("+soft-float-abi");
+ } else {
+ // Here, we make sure that -Wa,-mfpu/cpu/arch/hwdiv will be passed down
+ // to the assembler correctly.
+ for (const Arg *A :
+ Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
+ StringRef Value = A->getValue();
+ if (Value.startswith("-mfpu=")) {
+ WaFPU = A;
+ } else if (Value.startswith("-mcpu=")) {
+ WaCPU = A;
+ } else if (Value.startswith("-mhwdiv=")) {
+ WaHDiv = A;
+ } else if (Value.startswith("-march=")) {
+ WaArch = A;
+ }
+ }
}
- // Honor -mfpu=.
- if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
- getARMFPUFeatures(D, A, Args, Features);
- if (const Arg *A = Args.getLastArg(options::OPT_mhwdiv_EQ))
- getARMHWDivFeatures(D, A, Args, Features);
-
- // Check if -march is valid by checking if it can be canonicalised and parsed.
- // getARMArch is used here instead of just checking the -march value in order
- // to handle -march=native correctly.
- if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
- std::string Arch = arm::getARMArch(Args, Triple);
- if (llvm::ARMTargetParser::parseArch(Arch) == llvm::ARM::AK_INVALID)
- D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
- }
-
- // We do a similar thing with -mcpu, but here things are complicated because
- // the only function we have to check if a cpu is valid is
- // getLLVMArchSuffixForARM which also needs an architecture.
- if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
- std::string CPU = arm::getARMTargetCPU(Args, Triple);
- std::string Arch = arm::getARMArch(Args, Triple);
- if (strcmp(arm::getLLVMArchSuffixForARM(CPU, Arch), "") == 0)
- D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ // Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
+ const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
+ if (WaFPU) {
+ if (FPUArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << FPUArg->getAsString(Args);
+ getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6),
+ Features);
+ } else if (FPUArg) {
+ getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+ }
+
+ // Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
+ const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ);
+ if (WaHDiv) {
+ if (HDivArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << HDivArg->getAsString(Args);
+ getARMHWDivFeatures(D, WaHDiv, Args,
+ StringRef(WaHDiv->getValue()).substr(8), Features);
+ } else if (HDivArg)
+ getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);
+
+ // Check -march. ClangAs gives preference to -Wa,-march=.
+ const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
+ StringRef ArchName;
+ if (WaArch) {
+ if (ArchArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << ArchArg->getAsString(Args);
+ ArchName = StringRef(WaArch->getValue()).substr(7);
+ checkARMArchName(D, WaArch, Args, ArchName, Triple);
+ // FIXME: Set Arch.
+ D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args);
+ } else if (ArchArg) {
+ ArchName = ArchArg->getValue();
+ checkARMArchName(D, ArchArg, Args, ArchName, Triple);
+ }
+
+ // Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
+ const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
+ StringRef CPUName;
+ if (WaCPU) {
+ if (CPUArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << CPUArg->getAsString(Args);
+ CPUName = StringRef(WaCPU->getValue()).substr(6);
+ checkARMCPUName(D, WaCPU, Args, CPUName, ArchName, Triple);
+ } else if (CPUArg) {
+ CPUName = CPUArg->getValue();
+ checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, Triple);
}
// Setting -msoft-float effectively disables NEON because of the GCC
@@ -836,6 +927,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs,
CmdArgs.push_back("-arm-reserve-r9");
}
}
+// ARM tools end.
/// getAArch64TargetCPU - Get the (LLVM) name of the AArch64 cpu we are
/// targeting.
@@ -1463,7 +1555,8 @@ static const char *getX86TargetCPU(const ArgList &Args,
}
}
-static std::string getCPUName(const ArgList &Args, const llvm::Triple &T) {
+static std::string getCPUName(const ArgList &Args, const llvm::Triple &T,
+ bool FromAs = false) {
switch (T.getArch()) {
default:
return "";
@@ -1475,9 +1568,11 @@ static std::string getCPUName(const ArgList &Args, const llvm::Triple &T) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- return arm::getARMTargetCPU(Args, T);
-
+ case llvm::Triple::thumbeb: {
+ StringRef MArch, MCPU;
+ getARMArchCPUFromArgs(Args, MArch, MCPU, FromAs);
+ return arm::getARMTargetCPU(MCPU, MArch, T);
+ }
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -2198,12 +2293,12 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
A->claim();
- for (const StringRef Value : A->getValues()) {
- if (TakeNextArg) {
- CmdArgs.push_back(Value.data());
- TakeNextArg = false;
- continue;
- }
+ for (const StringRef Value : A->getValues()) {
+ if (TakeNextArg) {
+ CmdArgs.push_back(Value.data());
+ TakeNextArg = false;
+ continue;
+ }
if (Value == "-force_cpusubtype_ALL") {
// Do nothing, this is the default and we don't support anything else.
@@ -2227,6 +2322,9 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
TakeNextArg = true;
} else if (Value.startswith("-gdwarf-")) {
CmdArgs.push_back(Value.data());
+ } else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
+ Value.startswith("-mhwdiv") || Value.startswith("-march")) {
+ // Do nothing, we'll validate it later.
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
@@ -2243,8 +2341,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
// Until ARM libraries are build separately, we have them all in one library
static StringRef getArchNameForCompilerRTLib(const ToolChain &TC) {
- if (TC.getTriple().isOSWindows() &&
- !TC.getTriple().isWindowsItaniumEnvironment() &&
+ if (TC.getTriple().isWindowsMSVCEnvironment() &&
TC.getArch() == llvm::Triple::x86)
return "i386";
if (TC.getArch() == llvm::Triple::arm || TC.getArch() == llvm::Triple::armeb)
@@ -2270,10 +2367,12 @@ SmallString<128> tools::getCompilerRT(const ToolChain &TC, StringRef Component,
: "";
bool IsOSWindows = TC.getTriple().isOSWindows();
+ bool IsITANMSVCWindows = TC.getTriple().isWindowsMSVCEnvironment() ||
+ TC.getTriple().isWindowsItaniumEnvironment();
StringRef Arch = getArchNameForCompilerRTLib(TC);
- const char *Prefix = IsOSWindows ? "" : "lib";
+ const char *Prefix = IsITANMSVCWindows ? "" : "lib";
const char *Suffix =
- Shared ? (IsOSWindows ? ".dll" : ".so") : (IsOSWindows ? ".lib" : ".a");
+ Shared ? (IsOSWindows ? ".dll" : ".so") : (IsITANMSVCWindows ? ".lib" : ".a");
SmallString<128> Path = getCompilerRTLibDir(TC);
llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" +
@@ -2750,8 +2849,7 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
if (ProfileGenerateArg && ProfileUseArg)
D.Diag(diag::err_drv_argument_not_allowed_with)
- << ProfileGenerateArg->getSpelling()
- << ProfileUseArg->getSpelling();
+ << ProfileGenerateArg->getSpelling() << ProfileUseArg->getSpelling();
if (ProfileGenerateArg &&
ProfileGenerateArg->getOption().matches(
@@ -2907,8 +3005,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
assert((isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) &&
"Invalid action for clang tool.");
- if (JA.getType() == types::TY_LTO_IR ||
- JA.getType() == types::TY_LTO_BC) {
+ if (JA.getType() == types::TY_LTO_IR || JA.getType() == types::TY_LTO_BC) {
CmdArgs.push_back("-flto");
}
if (JA.getType() == types::TY_Nothing) {
@@ -3434,7 +3531,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Add the target cpu
- std::string CPU = getCPUName(Args, Triple);
+ std::string CPU = getCPUName(Args, Triple, /*FromAs*/ false);
if (!CPU.empty()) {
CmdArgs.push_back("-target-cpu");
CmdArgs.push_back(Args.MakeArgString(CPU));
@@ -3952,9 +4049,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fstandalone_debug);
Args.AddLastArg(CmdArgs, options::OPT_fno_standalone_debug);
Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
- // AltiVec language extensions aren't relevant for assembling.
- if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm)
+ // AltiVec-like language extensions aren't relevant for assembling.
+ if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm) {
Args.AddLastArg(CmdArgs, options::OPT_faltivec);
+ Args.AddLastArg(CmdArgs, options::OPT_fzvector);
+ }
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_show_template_tree);
Args.AddLastArg(CmdArgs, options::OPT_fno_elide_type);
@@ -3999,6 +4098,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< "ppc/ppc64/ppc64le";
}
+ // -fzvector is incompatible with -faltivec.
+ if (Arg *A = Args.getLastArg(options::OPT_fzvector))
+ if (Args.hasArg(options::OPT_faltivec))
+ D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args)
+ << "-faltivec";
+
if (getToolChain().SupportsProfiling())
Args.AddLastArg(CmdArgs, options::OPT_pg);
@@ -4234,8 +4339,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
llvm::sys::path::append(Path, "modules");
} else if (Path.empty()) {
// No module path was provided: use the default.
- llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false,
- Path);
+ llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false, Path);
llvm::sys::path::append(Path, "org.llvm.clang.");
appendUserToPath(Path);
llvm::sys::path::append(Path, "ModuleCache");
@@ -5270,7 +5374,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// Add the target cpu
const llvm::Triple Triple(TripleStr);
- std::string CPU = getCPUName(Args, Triple);
+ std::string CPU = getCPUName(Args, Triple, /*FromAs*/ true);
if (!CPU.empty()) {
CmdArgs.push_back("-target-cpu");
CmdArgs.push_back(Args.MakeArgString(CPU));
@@ -5775,16 +5879,12 @@ void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
// Hexagon tools end.
-const std::string arm::getARMArch(const ArgList &Args,
- const llvm::Triple &Triple) {
+const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
std::string MArch;
- if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
- // Otherwise, if we have -march= choose the base CPU for that arch.
- MArch = A->getValue();
- } else {
- // Otherwise, use the Arch from the triple.
+ if (!Arch.empty())
+ MArch = Arch;
+ else
MArch = Triple.getArchName();
- }
MArch = StringRef(MArch).lower();
// Handle -march=native.
@@ -5805,9 +5905,8 @@ const std::string arm::getARMArch(const ArgList &Args,
return MArch;
}
/// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
-const char *arm::getARMCPUForMArch(const ArgList &Args,
- const llvm::Triple &Triple) {
- std::string MArch = getARMArch(Args, Triple);
+const char *arm::getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple) {
+ std::string MArch = getARMArch(Arch, Triple);
// getARMCPUForArch defaults to the triple if MArch is empty, but empty MArch
// here means an -march=native that we can't handle, so instead return no CPU.
if (MArch.empty())
@@ -5823,12 +5922,12 @@ const char *arm::getARMCPUForMArch(const ArgList &Args,
}
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
-std::string arm::getARMTargetCPU(const ArgList &Args,
+std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch,
const llvm::Triple &Triple) {
// FIXME: Warn on inconsistent use of -mcpu and -march.
// If we have -mcpu=, use that.
- if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
- std::string MCPU = StringRef(A->getValue()).lower();
+ if (!CPU.empty()) {
+ std::string MCPU = StringRef(CPU).lower();
// Handle -mcpu=native.
if (MCPU == "native")
return llvm::sys::getHostCPUName();
@@ -5836,7 +5935,7 @@ std::string arm::getARMTargetCPU(const ArgList &Args,
return MCPU;
}
- return getARMCPUForMArch(Args, Triple);
+ return getARMCPUForMArch(Arch, Triple);
}
/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
@@ -7335,8 +7434,11 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
- std::string MArch = arm::getARMTargetCPU(Args, getToolChain().getTriple());
- CmdArgs.push_back(Args.MakeArgString("-mcpu=" + MArch));
+ StringRef MArch, MCPU;
+ getARMArchCPUFromArgs(Args, MArch, MCPU, /*FromAs*/ true);
+ std::string Arch =
+ arm::getARMTargetCPU(MCPU, MArch, getToolChain().getTriple());
+ CmdArgs.push_back(Args.MakeArgString("-mcpu=" + Arch));
break;
}
@@ -7816,6 +7918,7 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
bool isAndroid = Triple.getEnvironment() == llvm::Triple::Android;
+ bool isCygMing = Triple.isOSCygMing();
bool StaticLibgcc = Args.hasArg(options::OPT_static_libgcc) ||
Args.hasArg(options::OPT_static);
if (!D.CCCIsCXX())
@@ -7825,10 +7928,10 @@ static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
if (D.CCCIsCXX())
CmdArgs.push_back("-lgcc");
} else {
- if (!D.CCCIsCXX())
+ if (!D.CCCIsCXX() && !isCygMing)
CmdArgs.push_back("--as-needed");
CmdArgs.push_back("-lgcc_s");
- if (!D.CCCIsCXX())
+ if (!D.CCCIsCXX() && !isCygMing)
CmdArgs.push_back("--no-as-needed");
}
@@ -7863,13 +7966,15 @@ static std::string getLinuxDynamicLinker(const ArgList &Args,
else if (Arch == llvm::Triple::aarch64_be)
return "/lib/ld-linux-aarch64_be.so.1";
else if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb) {
- if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
+ if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF ||
+ tools::arm::getARMFloatABI(ToolChain.getDriver(), Args, ToolChain.getTriple()) == "hard")
return "/lib/ld-linux-armhf.so.3";
else
return "/lib/ld-linux.so.3";
} else if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb) {
// TODO: check which dynamic linker name.
- if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
+ if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF ||
+ tools::arm::getARMFloatABI(ToolChain.getDriver(), Args, ToolChain.getTriple()) == "hard")
return "/lib/ld-linux-armhf.so.3";
else
return "/lib/ld-linux.so.3";
@@ -8910,15 +9015,10 @@ void MinGW::Linker::AddLibGCC(const ArgList &Args,
if (Args.hasArg(options::OPT_mthreads))
CmdArgs.push_back("-lmingwthrd");
CmdArgs.push_back("-lmingw32");
- if (Args.hasArg(options::OPT_shared) ||
- Args.hasArg(options::OPT_shared_libgcc) ||
- !Args.hasArg(options::OPT_static_libgcc)) {
- CmdArgs.push_back("-lgcc_s");
- CmdArgs.push_back("-lgcc");
- } else {
- CmdArgs.push_back("-lgcc");
- CmdArgs.push_back("-lgcc_eh");
- }
+
+ // Add libgcc or compiler-rt.
+ AddRunTimeLibs(getToolChain(), getToolChain().getDriver(), CmdArgs, Args);
+
CmdArgs.push_back("-lmoldname");
CmdArgs.push_back("-lmingwex");
CmdArgs.push_back("-lmsvcrt");
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index 52ab731cbc46..651ddc8ff572 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -231,11 +231,11 @@ public:
} // end namespace hexagon.
namespace arm {
- std::string getARMTargetCPU(const llvm::opt::ArgList &Args,
+ std::string getARMTargetCPU(StringRef CPU, StringRef Arch,
const llvm::Triple &Triple);
- const std::string getARMArch(const llvm::opt::ArgList &Args,
+ const std::string getARMArch(StringRef Arch,
const llvm::Triple &Triple);
- const char* getARMCPUForMArch(const llvm::opt::ArgList &Args,
+ const char* getARMCPUForMArch(StringRef Arch,
const llvm::Triple &Triple);
const char* getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch);
diff --git a/lib/Frontend/ASTMerge.cpp b/lib/Frontend/ASTMerge.cpp
index f6f1551b4235..762c7a5da5e6 100644
--- a/lib/Frontend/ASTMerge.cpp
+++ b/lib/Frontend/ASTMerge.cpp
@@ -46,7 +46,7 @@ void ASTMergeAction::ExecuteAction() {
*CI.getDiagnostics().getClient()),
/*ShouldOwnClient=*/true));
std::unique_ptr<ASTUnit> Unit =
- ASTUnit::LoadFromASTFile(ASTFiles[I], CI.getPCHContainerOperations(),
+ ASTUnit::LoadFromASTFile(ASTFiles[I], CI.getPCHContainerReader(),
Diags, CI.getFileSystemOpts(), false);
if (!Unit)
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index 57c97d0c312f..1bb5c3ff279d 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -650,7 +650,7 @@ void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
const std::string &Filename,
- std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts, bool OnlyLocalDecls,
ArrayRef<RemappedFile> RemappedFiles, bool CaptureDiagnostics,
@@ -676,7 +676,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->getFileManager(),
UserFilesAreVolatile);
AST->HSOpts = new HeaderSearchOptions();
-
+ AST->HSOpts->ModuleFormat = PCHContainerRdr.getFormat();
AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
AST->getSourceManager(),
AST->getDiagnostics(),
@@ -708,7 +708,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
bool disableValid = false;
if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
disableValid = true;
- AST->Reader = new ASTReader(PP, Context, *PCHContainerOps,
+ AST->Reader = new ASTReader(PP, Context, PCHContainerRdr,
/*isysroot=*/"",
/*DisableValidation=*/disableValid,
AllowPCHWithCompilerErrors);
diff --git a/lib/Frontend/ChainedIncludesSource.cpp b/lib/Frontend/ChainedIncludesSource.cpp
index be30d43a843e..cc0504ba8b29 100644
--- a/lib/Frontend/ChainedIncludesSource.cpp
+++ b/lib/Frontend/ChainedIncludesSource.cpp
@@ -81,7 +81,7 @@ createASTReader(CompilerInstance &CI, StringRef pchFile,
Preprocessor &PP = CI.getPreprocessor();
std::unique_ptr<ASTReader> Reader;
Reader.reset(new ASTReader(PP, CI.getASTContext(),
- *CI.getPCHContainerOperations(),
+ CI.getPCHContainerReader(),
/*isysroot=*/"", /*DisableValidation=*/true));
for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
StringRef sr(bufNames[ti]);
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index ff041a8ec431..c33b150e3047 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -322,7 +322,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
PP->getFileManager(), PPOpts);
// Predefine macros and configure the preprocessor.
- InitializePreprocessor(*PP, PPOpts, *getPCHContainerOperations(),
+ InitializePreprocessor(*PP, PPOpts, getPCHContainerReader(),
getFrontendOpts());
// Initialize the header search object.
@@ -399,7 +399,7 @@ void CompilerInstance::createPCHExternalASTSource(
ModuleManager = createPCHExternalASTSource(
Path, getHeaderSearchOpts().Sysroot, DisablePCHValidation,
AllowPCHWithCompilerErrors, getPreprocessor(), getASTContext(),
- *getPCHContainerOperations(), DeserializationListener,
+ getPCHContainerReader(), DeserializationListener,
OwnDeserializationListener, Preamble,
getFrontendOpts().UseGlobalModuleIndex);
}
@@ -407,13 +407,13 @@ void CompilerInstance::createPCHExternalASTSource(
IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
StringRef Path, StringRef Sysroot, bool DisablePCHValidation,
bool AllowPCHWithCompilerErrors, Preprocessor &PP, ASTContext &Context,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
void *DeserializationListener, bool OwnDeserializationListener,
bool Preamble, bool UseGlobalModuleIndex) {
HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
IntrusiveRefCntPtr<ASTReader> Reader(new ASTReader(
- PP, Context, PCHContainerOps, Sysroot.empty() ? "" : Sysroot.data(),
+ PP, Context, PCHContainerRdr, Sysroot.empty() ? "" : Sysroot.data(),
DisablePCHValidation, AllowPCHWithCompilerErrors,
/*AllowConfigurationMismatch*/ false, HSOpts.ModulesValidateSystemHeaders,
UseGlobalModuleIndex));
@@ -1244,7 +1244,7 @@ void CompilerInstance::createModuleManager() {
ReadTimer = llvm::make_unique<llvm::Timer>("Reading modules",
*FrontendTimerGroup);
ModuleManager = new ASTReader(
- getPreprocessor(), *Context, *getPCHContainerOperations(),
+ getPreprocessor(), *Context, getPCHContainerReader(),
Sysroot.empty() ? "" : Sysroot.c_str(), PPOpts.DisablePCHValidation,
/*AllowASTWithCompilerErrors=*/false,
/*AllowConfigurationMismatch=*/false,
@@ -1296,7 +1296,7 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) {
ModuleFileStack.push_back(FileName);
ModuleNameStack.push_back(StringRef());
if (ASTReader::readASTFileControlBlock(FileName, CI.getFileManager(),
- *CI.getPCHContainerOperations(),
+ CI.getPCHContainerReader(),
*this)) {
CI.getDiagnostics().Report(
SourceLocation(), CI.getFileManager().getBufferForFile(FileName)
@@ -1667,7 +1667,7 @@ GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex(
llvm::sys::fs::create_directories(
getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
GlobalModuleIndex::writeIndex(
- getFileManager(), *getPCHContainerOperations(),
+ getFileManager(), getPCHContainerReader(),
getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
ModuleManager->resetForReload();
ModuleManager->loadGlobalIndex();
@@ -1695,7 +1695,7 @@ GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex(
}
if (RecreateIndex) {
GlobalModuleIndex::writeIndex(
- getFileManager(), *getPCHContainerOperations(),
+ getFileManager(), getPCHContainerReader(),
getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
ModuleManager->resetForReload();
ModuleManager->loadGlobalIndex();
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 6f13faf573be..fbeba09e1cf1 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -1112,6 +1112,8 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
getLastArgUInt64Value(Args, OPT_fbuild_session_timestamp, 0);
Opts.ModulesValidateSystemHeaders =
Args.hasArg(OPT_fmodules_validate_system_headers);
+ if (const Arg *A = Args.getLastArg(OPT_fmodule_format_EQ))
+ Opts.ModuleFormat = A->getValue();
for (const Arg *A : Args.filtered(OPT_fmodules_ignore_macro)) {
StringRef MacroDef = A->getValue();
@@ -1258,6 +1260,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
// OpenCL has some additional defaults.
if (Opts.OpenCL) {
Opts.AltiVec = 0;
+ Opts.ZVector = 0;
Opts.CXXOperatorNames = 1;
Opts.LaxVectorConversions = 0;
Opts.DefaultFPContract = 1;
@@ -1446,6 +1449,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_faltivec))
Opts.AltiVec = 1;
+ if (Args.hasArg(OPT_fzvector))
+ Opts.ZVector = 1;
+
if (Args.hasArg(OPT_pthread))
Opts.POSIXThreads = 1;
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index db9dd3b09887..3e0f7a12c3b3 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -191,7 +191,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
std::unique_ptr<ASTUnit> AST =
- ASTUnit::LoadFromASTFile(InputFile, CI.getPCHContainerOperations(),
+ ASTUnit::LoadFromASTFile(InputFile, CI.getPCHContainerReader(),
Diags, CI.getFileSystemOpts());
if (!AST)
@@ -273,7 +273,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Check whether this is an acceptable AST file.
if (ASTReader::isAcceptableASTFile(
- Dir->path(), FileMgr, *CI.getPCHContainerOperations(),
+ Dir->path(), FileMgr, CI.getPCHContainerReader(),
CI.getLangOpts(), CI.getTargetOpts(), CI.getPreprocessorOpts(),
SpecificModuleCachePath)) {
PPOpts.ImplicitPCHInclude = Dir->path();
@@ -443,7 +443,7 @@ bool FrontendAction::Execute() {
if (CI.shouldBuildGlobalModuleIndex() && CI.hasFileManager() &&
CI.hasPreprocessor()) {
GlobalModuleIndex::writeIndex(
- CI.getFileManager(), *CI.getPCHContainerOperations(),
+ CI.getFileManager(), CI.getPCHContainerReader(),
CI.getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
}
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index 499776468818..40277bdaa52d 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -93,7 +93,7 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
Consumers.push_back(llvm::make_unique<PCHGenerator>(
CI.getPreprocessor(), OutputFile, nullptr, Sysroot, Buffer));
Consumers.push_back(
- CI.getPCHContainerOperations()->CreatePCHContainerGenerator(
+ CI.getPCHContainerWriter().CreatePCHContainerGenerator(
CI.getDiagnostics(), CI.getHeaderSearchOpts(),
CI.getPreprocessorOpts(), CI.getTargetOpts(), CI.getLangOpts(),
InFile, OutputFile, OS, Buffer));
@@ -139,7 +139,7 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
Consumers.push_back(llvm::make_unique<PCHGenerator>(
CI.getPreprocessor(), OutputFile, Module, Sysroot, Buffer));
Consumers.push_back(
- CI.getPCHContainerOperations()->CreatePCHContainerGenerator(
+ CI.getPCHContainerWriter().CreatePCHContainerGenerator(
CI.getDiagnostics(), CI.getHeaderSearchOpts(),
CI.getPreprocessorOpts(), CI.getTargetOpts(), CI.getLangOpts(),
InFile, OutputFile, OS, Buffer));
@@ -415,7 +415,7 @@ void VerifyPCHAction::ExecuteAction() {
bool Preamble = CI.getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
const std::string &Sysroot = CI.getHeaderSearchOpts().Sysroot;
std::unique_ptr<ASTReader> Reader(new ASTReader(
- CI.getPreprocessor(), CI.getASTContext(), *CI.getPCHContainerOperations(),
+ CI.getPreprocessor(), CI.getASTContext(), CI.getPCHContainerReader(),
Sysroot.empty() ? "" : Sysroot.c_str(),
/*DisableValidation*/ false,
/*AllowPCHWithCompilerErrors*/ false,
@@ -578,7 +578,7 @@ void DumpModuleInfoAction::ExecuteAction() {
DumpModuleInfoListener Listener(Out);
ASTReader::readASTFileControlBlock(
getCurrentFile(), getCompilerInstance().getFileManager(),
- *getCompilerInstance().getPCHContainerOperations(), Listener);
+ getCompilerInstance().getPCHContainerReader(), Listener);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index 025c8b9d43a9..0791494f7919 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -97,11 +97,11 @@ static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
/// \brief Add an implicit \#include using the original file used to generate
/// a PCH file.
static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
StringRef ImplicitIncludePCH) {
std::string OriginalFile =
ASTReader::getOriginalSourceFile(ImplicitIncludePCH, PP.getFileManager(),
- PCHContainerOps, PP.getDiagnostics());
+ PCHContainerRdr, PP.getDiagnostics());
if (OriginalFile.empty())
return;
@@ -902,7 +902,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
///
void clang::InitializePreprocessor(
Preprocessor &PP, const PreprocessorOptions &InitOpts,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
const FrontendOptions &FEOpts) {
const LangOptions &LangOpts = PP.getLangOpts();
std::string PredefineBuffer;
@@ -962,7 +962,7 @@ void clang::InitializePreprocessor(
// Process -include-pch/-include-pth directives.
if (!InitOpts.ImplicitPCHInclude.empty())
- AddImplicitIncludePCH(Builder, PP, PCHContainerOps,
+ AddImplicitIncludePCH(Builder, PP, PCHContainerRdr,
InitOpts.ImplicitPCHInclude);
if (!InitOpts.ImplicitPTHInclude.empty())
AddImplicitIncludePTH(Builder, PP, InitOpts.ImplicitPTHInclude);
diff --git a/lib/Frontend/PCHContainerOperations.cpp b/lib/Frontend/PCHContainerOperations.cpp
index c749bb5c8db4..cde3ba139bcf 100644
--- a/lib/Frontend/PCHContainerOperations.cpp
+++ b/lib/Frontend/PCHContainerOperations.cpp
@@ -21,21 +21,22 @@ using namespace clang;
namespace {
/// \brief A PCHContainerGenerator that writes out the PCH to a flat file.
-class PCHContainerGenerator : public ASTConsumer {
+class RawPCHContainerGenerator : public ASTConsumer {
std::shared_ptr<PCHBuffer> Buffer;
raw_pwrite_stream *OS;
public:
- PCHContainerGenerator(DiagnosticsEngine &Diags,
- const HeaderSearchOptions &HSO,
- const PreprocessorOptions &PPO, const TargetOptions &TO,
- const LangOptions &LO, const std::string &MainFileName,
- const std::string &OutputFileName,
- llvm::raw_pwrite_stream *OS,
- std::shared_ptr<PCHBuffer> Buffer)
+ RawPCHContainerGenerator(DiagnosticsEngine &Diags,
+ const HeaderSearchOptions &HSO,
+ const PreprocessorOptions &PPO,
+ const TargetOptions &TO, const LangOptions &LO,
+ const std::string &MainFileName,
+ const std::string &OutputFileName,
+ llvm::raw_pwrite_stream *OS,
+ std::shared_ptr<PCHBuffer> Buffer)
: Buffer(Buffer), OS(OS) {}
- virtual ~PCHContainerGenerator() {}
+ virtual ~RawPCHContainerGenerator() {}
void HandleTranslationUnit(ASTContext &Ctx) override {
if (Buffer->IsComplete) {
@@ -50,19 +51,23 @@ public:
};
}
-std::unique_ptr<ASTConsumer>
-RawPCHContainerOperations::CreatePCHContainerGenerator(
+std::unique_ptr<ASTConsumer> RawPCHContainerWriter::CreatePCHContainerGenerator(
DiagnosticsEngine &Diags, const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO, const TargetOptions &TO,
const LangOptions &LO, const std::string &MainFileName,
const std::string &OutputFileName, llvm::raw_pwrite_stream *OS,
std::shared_ptr<PCHBuffer> Buffer) const {
- return llvm::make_unique<PCHContainerGenerator>(
+ return llvm::make_unique<RawPCHContainerGenerator>(
Diags, HSO, PPO, TO, LO, MainFileName, OutputFileName, OS, Buffer);
}
-void RawPCHContainerOperations::ExtractPCH(
+void RawPCHContainerReader::ExtractPCH(
llvm::MemoryBufferRef Buffer, llvm::BitstreamReader &StreamFile) const {
StreamFile.init((const unsigned char *)Buffer.getBufferStart(),
(const unsigned char *)Buffer.getBufferEnd());
}
+
+PCHContainerOperations::PCHContainerOperations() {
+ registerWriter(llvm::make_unique<RawPCHContainerWriter>());
+ registerReader(llvm::make_unique<RawPCHContainerReader>());
+}
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index 87afc60de996..7de5fbe8be0e 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -59,6 +59,7 @@ set(files
unwind.h
vadefs.h
varargs.h
+ vecintrin.h
__wmmintrin_aes.h
wmmintrin.h
__wmmintrin_pclmul.h
diff --git a/lib/Headers/__wmmintrin_aes.h b/lib/Headers/__wmmintrin_aes.h
index 81b2b8d0b0a4..9f594ee56092 100644
--- a/lib/Headers/__wmmintrin_aes.h
+++ b/lib/Headers/__wmmintrin_aes.h
@@ -25,8 +25,12 @@
#include <emmintrin.h>
+#if !defined (__AES__)
+# error "AES instructions not enabled"
+#else
+
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_aesenc_si128(__m128i __V, __m128i __R)
@@ -63,4 +67,6 @@ _mm_aesimc_si128(__m128i __V)
#undef __DEFAULT_FN_ATTRS
+#endif
+
#endif /* _WMMINTRIN_AES_H */
diff --git a/lib/Headers/__wmmintrin_pclmul.h b/lib/Headers/__wmmintrin_pclmul.h
index 48a85d24ee1b..8d1f1b7c0868 100644
--- a/lib/Headers/__wmmintrin_pclmul.h
+++ b/lib/Headers/__wmmintrin_pclmul.h
@@ -23,8 +23,12 @@
#ifndef _WMMINTRIN_PCLMUL_H
#define _WMMINTRIN_PCLMUL_H
+#if !defined (__PCLMUL__)
+# error "PCLMUL instruction is not enabled"
+#else
#define _mm_clmulepi64_si128(__X, __Y, __I) \
((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
(__v2di)(__m128i)(__Y), (char)(__I)))
+#endif
#endif /* _WMMINTRIN_PCLMUL_H */
diff --git a/lib/Headers/adxintrin.h b/lib/Headers/adxintrin.h
index ee347284178e..b8eb9cbf6ebb 100644
--- a/lib/Headers/adxintrin.h
+++ b/lib/Headers/adxintrin.h
@@ -32,7 +32,8 @@
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* Intrinsics that are available only if __ADX__ defined */
-static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+#ifdef __ADX__
+static __inline unsigned char __DEFAULT_FN_ATTRS
_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
unsigned int *__p)
{
@@ -40,13 +41,14 @@ _addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
}
#ifdef __x86_64__
-static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+static __inline unsigned char __DEFAULT_FN_ATTRS
_addcarryx_u64(unsigned char __cf, unsigned long long __x,
unsigned long long __y, unsigned long long *__p)
{
return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
+#endif
/* Intrinsics that are also available if __ADX__ undefined */
static __inline unsigned char __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index f52bcbc5b4a8..5c8eb5640546 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -6563,119 +6563,218 @@ static vector signed char __ATTRS_o_ai vec_sld(vector signed char __a,
vector signed char __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector unsigned char __ATTRS_o_ai vec_sld(vector unsigned char __a,
vector unsigned char __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector bool char __ATTRS_o_ai vec_sld(vector bool char __a,
vector bool char __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector signed short __ATTRS_o_ai vec_sld(vector signed short __a,
vector signed short __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector unsigned short __ATTRS_o_ai vec_sld(vector unsigned short __a,
vector unsigned short __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector bool short __ATTRS_o_ai vec_sld(vector bool short __a,
vector bool short __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector pixel __ATTRS_o_ai vec_sld(vector pixel __a, vector pixel __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector signed int __ATTRS_o_ai vec_sld(vector signed int __a,
vector signed int __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector unsigned int __ATTRS_o_ai vec_sld(vector unsigned int __a,
vector unsigned int __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
vector bool int __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector float __ATTRS_o_ai vec_sld(vector float __a, vector float __b,
unsigned const int __c) {
unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
(vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
__d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
__d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
/* vec_vsldoi */
@@ -6683,77 +6782,157 @@ static vector float __ATTRS_o_ai vec_sld(vector float __a, vector float __b,
static vector signed char __ATTRS_o_ai vec_vsldoi(vector signed char __a,
vector signed char __b,
unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
- (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
- __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
- __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector unsigned char __ATTRS_o_ai vec_vsldoi(vector unsigned char __a,
vector unsigned char __b,
unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
- (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
- __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
- __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector short __ATTRS_o_ai vec_vsldoi(vector short __a, vector short __b,
unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
- (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
- __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
- __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector unsigned short __ATTRS_o_ai vec_vsldoi(vector unsigned short __a,
vector unsigned short __b,
unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
- (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
- __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
- __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a, vector pixel __b,
unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
- (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
- __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
- __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector int __ATTRS_o_ai vec_vsldoi(vector int __a, vector int __b,
unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
- (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
- __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
- __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector unsigned int __ATTRS_o_ai vec_vsldoi(vector unsigned int __a,
vector unsigned int __b,
unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
- (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
- __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
- __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
static vector float __ATTRS_o_ai vec_vsldoi(vector float __a, vector float __b,
unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
return vec_perm(
__a, __b,
- (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
- __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
- __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
}
/* vec_sll */
diff --git a/lib/Headers/ammintrin.h b/lib/Headers/ammintrin.h
index 91c633305b8b..4d0e770ff9e4 100644
--- a/lib/Headers/ammintrin.h
+++ b/lib/Headers/ammintrin.h
@@ -24,10 +24,14 @@
#ifndef __AMMINTRIN_H
#define __AMMINTRIN_H
+#ifndef __SSE4A__
+#error "SSE4A instruction set not enabled"
+#else
+
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
/// integer vector operand at the index idx and of the length len.
@@ -206,4 +210,6 @@ _mm_stream_ss(float *__p, __m128 __a)
#undef __DEFAULT_FN_ATTRS
+#endif /* __SSE4A__ */
+
#endif /* __AMMINTRIN_H */
diff --git a/lib/Headers/avx2intrin.h b/lib/Headers/avx2intrin.h
index cfa91410a338..d8b6b0aa4d23 100644
--- a/lib/Headers/avx2intrin.h
+++ b/lib/Headers/avx2intrin.h
@@ -29,7 +29,7 @@
#define __AVX2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx2")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* SSE4 Multiple Packed Sums of Absolute Difference. */
#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h
index b0d3462d4db6..9e8297a9c9a5 100644
--- a/lib/Headers/avx512bwintrin.h
+++ b/lib/Headers/avx512bwintrin.h
@@ -34,7 +34,7 @@ typedef char __v64qi __attribute__ ((__vector_size__ (64)));
typedef short __v32hi __attribute__ ((__vector_size__ (64)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline __v64qi __DEFAULT_FN_ATTRS
_mm512_setzero_qi (void) {
diff --git a/lib/Headers/avx512dqintrin.h b/lib/Headers/avx512dqintrin.h
index 8a69f7ffbf34..c946de286742 100644
--- a/lib/Headers/avx512dqintrin.h
+++ b/lib/Headers/avx512dqintrin.h
@@ -29,7 +29,7 @@
#define __AVX512DQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
index 099114453646..4f7cba0b1507 100644
--- a/lib/Headers/avx512fintrin.h
+++ b/lib/Headers/avx512fintrin.h
@@ -47,7 +47,7 @@ typedef unsigned short __mmask16;
#define _MM_FROUND_CUR_DIRECTION 0x04
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* Create vectors with repeated elements */
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
index eb198a5ade6f..74ec17583096 100644
--- a/lib/Headers/avx512vlbwintrin.h
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -29,7 +29,7 @@
#define __AVX512VLBWINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* Integer compare */
diff --git a/lib/Headers/avx512vldqintrin.h b/lib/Headers/avx512vldqintrin.h
index 032070bfb902..1edf29d128ee 100644
--- a/lib/Headers/avx512vldqintrin.h
+++ b/lib/Headers/avx512vldqintrin.h
@@ -29,7 +29,7 @@
#define __AVX512VLDQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h
index 73f46d101a55..fc1b9d6e7a23 100644
--- a/lib/Headers/avx512vlintrin.h
+++ b/lib/Headers/avx512vlintrin.h
@@ -29,7 +29,7 @@
#define __AVX512VLINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* Integer compare */
diff --git a/lib/Headers/avxintrin.h b/lib/Headers/avxintrin.h
index 6a9972b65bb0..c1bc85b39e82 100644
--- a/lib/Headers/avxintrin.h
+++ b/lib/Headers/avxintrin.h
@@ -40,7 +40,7 @@ typedef double __m256d __attribute__((__vector_size__(32)));
typedef long long __m256i __attribute__((__vector_size__(32)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* Arithmetic */
static __inline __m256d __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/bmi2intrin.h b/lib/Headers/bmi2intrin.h
index fdae82cf2ba7..c63397c96ebe 100644
--- a/lib/Headers/bmi2intrin.h
+++ b/lib/Headers/bmi2intrin.h
@@ -25,11 +25,15 @@
#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
#endif
+#ifndef __BMI2__
+# error "BMI2 instruction set not enabled"
+#endif /* __BMI2__ */
+
#ifndef __BMI2INTRIN_H
#define __BMI2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_bzhi_u32(unsigned int __X, unsigned int __Y)
diff --git a/lib/Headers/bmiintrin.h b/lib/Headers/bmiintrin.h
index dc2f83f3e2d1..0e93d575cb8b 100644
--- a/lib/Headers/bmiintrin.h
+++ b/lib/Headers/bmiintrin.h
@@ -25,6 +25,10 @@
#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
#endif
+#ifndef __BMI__
+# error "BMI instruction set not enabled"
+#endif /* __BMI__ */
+
#ifndef __BMIINTRIN_H
#define __BMIINTRIN_H
@@ -37,7 +41,7 @@
#define _tzcnt_u32(a) (__tzcnt_u32((a)))
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned short __DEFAULT_FN_ATTRS
__tzcnt_u16(unsigned short __X)
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index e22ffaf19240..656bc19d3dea 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -24,6 +24,10 @@
#ifndef __EMMINTRIN_H
#define __EMMINTRIN_H
+#ifndef __SSE2__
+#error "SSE2 instruction set not enabled"
+#else
+
#include <xmmintrin.h>
typedef double __m128d __attribute__((__vector_size__(16)));
@@ -36,7 +40,7 @@ typedef short __v8hi __attribute__((__vector_size__(16)));
typedef char __v16qi __attribute__((__vector_size__(16)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_add_sd(__m128d __a, __m128d __b)
@@ -1471,4 +1475,6 @@ _mm_pause(void)
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
+#endif /* __SSE2__ */
+
#endif /* __EMMINTRIN_H */
diff --git a/lib/Headers/f16cintrin.h b/lib/Headers/f16cintrin.h
index 9349b78fe84c..3730ae0d3eeb 100644
--- a/lib/Headers/f16cintrin.h
+++ b/lib/Headers/f16cintrin.h
@@ -25,6 +25,10 @@
#error "Never use <f16cintrin.h> directly; include <x86intrin.h> instead."
#endif
+#ifndef __F16C__
+# error "F16C instruction is not enabled"
+#endif /* __F16C__ */
+
#ifndef __F16CINTRIN_H
#define __F16CINTRIN_H
@@ -32,7 +36,7 @@ typedef float __v8sf __attribute__ ((__vector_size__ (32)));
typedef float __m256 __attribute__ ((__vector_size__ (32)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
#define _mm_cvtps_ph(a, imm) __extension__ ({ \
__m128 __a = (a); \
diff --git a/lib/Headers/fma4intrin.h b/lib/Headers/fma4intrin.h
index f1178877b252..d6405cf02922 100644
--- a/lib/Headers/fma4intrin.h
+++ b/lib/Headers/fma4intrin.h
@@ -28,10 +28,14 @@
#ifndef __FMA4INTRIN_H
#define __FMA4INTRIN_H
+#ifndef __FMA4__
+# error "FMA4 instruction set is not enabled"
+#else
+
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma4")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
@@ -227,4 +231,6 @@ _mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
#undef __DEFAULT_FN_ATTRS
+#endif /* __FMA4__ */
+
#endif /* __FMA4INTRIN_H */
diff --git a/lib/Headers/fmaintrin.h b/lib/Headers/fmaintrin.h
index 114a14380ea0..ad693fed0bfd 100644
--- a/lib/Headers/fmaintrin.h
+++ b/lib/Headers/fmaintrin.h
@@ -28,8 +28,12 @@
#ifndef __FMAINTRIN_H
#define __FMAINTRIN_H
+#ifndef __FMA__
+# error "FMA instruction set is not enabled"
+#else
+
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
@@ -225,4 +229,6 @@ _mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
#undef __DEFAULT_FN_ATTRS
+#endif /* __FMA__ */
+
#endif /* __FMAINTRIN_H */
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index 604bc8ca5b39..21ad3281f850 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -24,123 +24,175 @@
#ifndef __IMMINTRIN_H
#define __IMMINTRIN_H
+#ifdef __MMX__
#include <mmintrin.h>
+#endif
+#ifdef __SSE__
#include <xmmintrin.h>
+#endif
+#ifdef __SSE2__
#include <emmintrin.h>
+#endif
+#ifdef __SSE3__
#include <pmmintrin.h>
+#endif
+#ifdef __SSSE3__
#include <tmmintrin.h>
+#endif
+#if defined (__SSE4_2__) || defined (__SSE4_1__)
#include <smmintrin.h>
+#endif
+#if defined (__AES__) || defined (__PCLMUL__)
#include <wmmintrin.h>
+#endif
+#ifdef __AVX__
#include <avxintrin.h>
+#endif
+#ifdef __AVX2__
#include <avx2intrin.h>
+#endif
+#ifdef __BMI__
#include <bmiintrin.h>
+#endif
+#ifdef __BMI2__
#include <bmi2intrin.h>
+#endif
+#ifdef __LZCNT__
#include <lzcntintrin.h>
+#endif
+#ifdef __FMA__
#include <fmaintrin.h>
+#endif
+#ifdef __AVX512F__
#include <avx512fintrin.h>
+#endif
+#ifdef __AVX512VL__
#include <avx512vlintrin.h>
+#endif
+#ifdef __AVX512BW__
#include <avx512bwintrin.h>
+#endif
+#ifdef __AVX512CD__
#include <avx512cdintrin.h>
+#endif
+#ifdef __AVX512DQ__
#include <avx512dqintrin.h>
+#endif
+#if defined (__AVX512VL__) && defined (__AVX512BW__)
#include <avx512vlbwintrin.h>
+#endif
+#if defined (__AVX512VL__) && defined (__AVX512DQ__)
#include <avx512vldqintrin.h>
+#endif
+#ifdef __AVX512ER__
#include <avx512erintrin.h>
+#endif
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+#ifdef __RDRND__
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdrand16_step(unsigned short *__p)
{
return __builtin_ia32_rdrand16_step(__p);
}
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdrand32_step(unsigned int *__p)
{
return __builtin_ia32_rdrand32_step(__p);
}
#ifdef __x86_64__
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_rdrand64_step(unsigned long long *__p)
{
return __builtin_ia32_rdrand64_step(__p);
}
#endif
+#endif /* __RDRND__ */
+#ifdef __FSGSBASE__
#ifdef __x86_64__
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_readfsbase_u32(void)
{
return __builtin_ia32_rdfsbase32();
}
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_readfsbase_u64(void)
{
return __builtin_ia32_rdfsbase64();
}
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_readgsbase_u32(void)
{
return __builtin_ia32_rdgsbase32();
}
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_readgsbase_u64(void)
{
return __builtin_ia32_rdgsbase64();
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_writefsbase_u32(unsigned int __V)
{
return __builtin_ia32_wrfsbase32(__V);
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_writefsbase_u64(unsigned long long __V)
{
return __builtin_ia32_wrfsbase64(__V);
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_writegsbase_u32(unsigned int __V)
{
return __builtin_ia32_wrgsbase32(__V);
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_writegsbase_u64(unsigned long long __V)
{
return __builtin_ia32_wrgsbase64(__V);
}
#endif
+#endif /* __FSGSBASE__ */
+#ifdef __RTM__
#include <rtmintrin.h>
+#endif
+#ifdef __RTM__
#include <xtestintrin.h>
+#endif
+#ifdef __SHA__
#include <shaintrin.h>
+#endif
#include <fxsrintrin.h>
diff --git a/lib/Headers/lzcntintrin.h b/lib/Headers/lzcntintrin.h
index 4c00e42ac3a9..8ee29975c2eb 100644
--- a/lib/Headers/lzcntintrin.h
+++ b/lib/Headers/lzcntintrin.h
@@ -25,11 +25,15 @@
#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
#endif
+#ifndef __LZCNT__
+# error "LZCNT instruction is not enabled"
+#endif /* __LZCNT__ */
+
#ifndef __LZCNTINTRIN_H
#define __LZCNTINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned short __DEFAULT_FN_ATTRS
__lzcnt16(unsigned short __X)
diff --git a/lib/Headers/mm3dnow.h b/lib/Headers/mm3dnow.h
index 3218df827177..ac8e0f4af1bf 100644
--- a/lib/Headers/mm3dnow.h
+++ b/lib/Headers/mm3dnow.h
@@ -30,7 +30,7 @@
typedef float __v2sf __attribute__((__vector_size__(8)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ void __DEFAULT_FN_ATTRS
_m_femms() {
diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h
index 484b499dd7fc..0be5f32c7d02 100644
--- a/lib/Headers/mmintrin.h
+++ b/lib/Headers/mmintrin.h
@@ -24,6 +24,10 @@
#ifndef __MMINTRIN_H
#define __MMINTRIN_H
+#ifndef __MMX__
+#error "MMX instruction set not enabled"
+#else
+
typedef long long __m64 __attribute__((__vector_size__(8)));
typedef int __v2si __attribute__((__vector_size__(8)));
@@ -31,7 +35,7 @@ typedef short __v4hi __attribute__((__vector_size__(8)));
typedef char __v8qi __attribute__((__vector_size__(8)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ void __DEFAULT_FN_ATTRS
_mm_empty(void)
@@ -497,5 +501,7 @@ _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
#define _m_pcmpgtw _mm_cmpgt_pi16
#define _m_pcmpgtd _mm_cmpgt_pi32
+#endif /* __MMX__ */
+
#endif /* __MMINTRIN_H */
diff --git a/lib/Headers/module.modulemap b/lib/Headers/module.modulemap
index 0fc70a8c9b06..b861fdd8c2a1 100644
--- a/lib/Headers/module.modulemap
+++ b/lib/Headers/module.modulemap
@@ -32,117 +32,142 @@ module _Builtin_intrinsics [system] [extern_c] {
}
explicit module cpuid {
+ requires x86
header "cpuid.h"
}
explicit module mmx {
+ requires mmx
header "mmintrin.h"
}
explicit module f16c {
+ requires f16c
header "f16cintrin.h"
}
explicit module sse {
+ requires sse
export mmx
export sse2 // note: for hackish <emmintrin.h> dependency
header "xmmintrin.h"
}
explicit module sse2 {
+ requires sse2
export sse
header "emmintrin.h"
}
explicit module sse3 {
+ requires sse3
export sse2
header "pmmintrin.h"
}
explicit module ssse3 {
+ requires ssse3
export sse3
header "tmmintrin.h"
}
explicit module sse4_1 {
+ requires sse41
export ssse3
header "smmintrin.h"
}
explicit module sse4_2 {
+ requires sse42
export sse4_1
header "nmmintrin.h"
}
explicit module sse4a {
+ requires sse4a
export sse3
header "ammintrin.h"
}
explicit module avx {
+ requires avx
export sse4_2
header "avxintrin.h"
}
explicit module avx2 {
+ requires avx2
export avx
header "avx2intrin.h"
}
explicit module avx512f {
+ requires avx512f
export avx2
header "avx512fintrin.h"
}
explicit module avx512er {
+ requires avx512er
header "avx512erintrin.h"
}
explicit module bmi {
+ requires bmi
header "bmiintrin.h"
}
explicit module bmi2 {
+ requires bmi2
header "bmi2intrin.h"
}
explicit module fma {
+ requires fma
header "fmaintrin.h"
}
explicit module fma4 {
+ requires fma4
export sse3
header "fma4intrin.h"
}
explicit module lzcnt {
+ requires lzcnt
header "lzcntintrin.h"
}
explicit module popcnt {
+ requires popcnt
header "popcntintrin.h"
}
explicit module mm3dnow {
+ requires mm3dnow
header "mm3dnow.h"
}
explicit module xop {
+ requires xop
export fma4
header "xopintrin.h"
}
explicit module aes_pclmul {
+ requires aes, pclmul
header "wmmintrin.h"
export aes
export pclmul
}
explicit module aes {
+ requires aes
header "__wmmintrin_aes.h"
}
explicit module pclmul {
+ requires pclmul
header "__wmmintrin_pclmul.h"
}
}
@@ -158,6 +183,11 @@ module _Builtin_intrinsics [system] [extern_c] {
header "htmintrin.h"
header "htmxlintrin.h"
}
+
+ explicit module zvector {
+ requires zvector, vx
+ header "vecintrin.h"
+ }
}
}
diff --git a/lib/Headers/nmmintrin.h b/lib/Headers/nmmintrin.h
index 57fec15963d1..f12622d7be68 100644
--- a/lib/Headers/nmmintrin.h
+++ b/lib/Headers/nmmintrin.h
@@ -24,7 +24,12 @@
#ifndef _NMMINTRIN_H
#define _NMMINTRIN_H
+#ifndef __SSE4_2__
+#error "SSE4.2 instruction set not enabled"
+#else
+
/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
just include it now then. */
#include <smmintrin.h>
+#endif /* __SSE4_2__ */
#endif /* _NMMINTRIN_H */
diff --git a/lib/Headers/pmmintrin.h b/lib/Headers/pmmintrin.h
index e888b6fb7067..e1b8d9b603d1 100644
--- a/lib/Headers/pmmintrin.h
+++ b/lib/Headers/pmmintrin.h
@@ -24,10 +24,14 @@
#ifndef __PMMINTRIN_H
#define __PMMINTRIN_H
+#ifndef __SSE3__
+#error "SSE3 instruction set not enabled"
+#else
+
#include <emmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse3")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_lddqu_si128(__m128i const *__p)
@@ -113,4 +117,6 @@ _mm_mwait(unsigned __extensions, unsigned __hints)
#undef __DEFAULT_FN_ATTRS
+#endif /* __SSE3__ */
+
#endif /* __PMMINTRIN_H */
diff --git a/lib/Headers/popcntintrin.h b/lib/Headers/popcntintrin.h
index 29c074b61d1c..1a4e9000aeb6 100644
--- a/lib/Headers/popcntintrin.h
+++ b/lib/Headers/popcntintrin.h
@@ -21,11 +21,15 @@
*===-----------------------------------------------------------------------===
*/
+#ifndef __POPCNT__
+#error "POPCNT instruction set not enabled"
+#endif
+
#ifndef _POPCNTINTRIN_H
#define _POPCNTINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ int __DEFAULT_FN_ATTRS
_mm_popcnt_u32(unsigned int __A)
diff --git a/lib/Headers/rdseedintrin.h b/lib/Headers/rdseedintrin.h
index 421f4ea48702..fdf7e18afa95 100644
--- a/lib/Headers/rdseedintrin.h
+++ b/lib/Headers/rdseedintrin.h
@@ -28,8 +28,10 @@
#ifndef __RDSEEDINTRIN_H
#define __RDSEEDINTRIN_H
+#ifdef __RDSEED__
+
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed16_step(unsigned short *__p)
@@ -53,4 +55,5 @@ _rdseed64_step(unsigned long long *__p)
#undef __DEFAULT_FN_ATTRS
+#endif /* __RDSEED__ */
#endif /* __RDSEEDINTRIN_H */
diff --git a/lib/Headers/rtmintrin.h b/lib/Headers/rtmintrin.h
index e6a58d743bc9..17256815fb8d 100644
--- a/lib/Headers/rtmintrin.h
+++ b/lib/Headers/rtmintrin.h
@@ -38,7 +38,7 @@
#define _XABORT_CODE(x) (((x) >> 24) & 0xFF)
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rtm")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_xbegin(void)
diff --git a/lib/Headers/s390intrin.h b/lib/Headers/s390intrin.h
index b20989552d4d..d51274c07df5 100644
--- a/lib/Headers/s390intrin.h
+++ b/lib/Headers/s390intrin.h
@@ -32,4 +32,8 @@
#include <htmintrin.h>
#endif
+#ifdef __VEC__
+#include <vecintrin.h>
+#endif
+
#endif /* __S390INTRIN_H*/
diff --git a/lib/Headers/shaintrin.h b/lib/Headers/shaintrin.h
index 8602d0249d48..960cced7a55c 100644
--- a/lib/Headers/shaintrin.h
+++ b/lib/Headers/shaintrin.h
@@ -28,8 +28,12 @@
#ifndef __SHAINTRIN_H
#define __SHAINTRIN_H
+#if !defined (__SHA__)
+# error "SHA instructions not enabled"
+#endif
+
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
#define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \
__builtin_ia32_sha1rnds4((V1), (V2), (M)); })
diff --git a/lib/Headers/smmintrin.h b/lib/Headers/smmintrin.h
index e197590a38e2..04bd0722b11f 100644
--- a/lib/Headers/smmintrin.h
+++ b/lib/Headers/smmintrin.h
@@ -24,10 +24,14 @@
#ifndef _SMMINTRIN_H
#define _SMMINTRIN_H
+#ifndef __SSE4_1__
+#error "SSE4.1 instruction set not enabled"
+#else
+
#include <tmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* SSE4 Rounding macros. */
#define _MM_FROUND_TO_NEAREST_INT 0x00
@@ -375,13 +379,9 @@ _mm_minpos_epu16(__m128i __V)
return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
}
-/* Handle the sse4.2 definitions here. */
-
/* These definitions are normally in nmmintrin.h, but gcc puts them in here
so we'll do the same. */
-
-#undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+#ifdef __SSE4_2__
/* These specify the type of data that we're comparing. */
#define _SIDD_UBYTE_OPS 0x00
@@ -481,4 +481,7 @@ _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
#include <popcntintrin.h>
#endif
+#endif /* __SSE4_2__ */
+#endif /* __SSE4_1__ */
+
#endif /* _SMMINTRIN_H */
diff --git a/lib/Headers/tbmintrin.h b/lib/Headers/tbmintrin.h
index 62f613f9ee5c..48c0b07f423f 100644
--- a/lib/Headers/tbmintrin.h
+++ b/lib/Headers/tbmintrin.h
@@ -21,6 +21,10 @@
*===-----------------------------------------------------------------------===
*/
+#ifndef __TBM__
+#error "TBM instruction set is not enabled"
+#endif
+
#ifndef __X86INTRIN_H
#error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead."
#endif
@@ -29,7 +33,7 @@
#define __TBMINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("tbm")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
#define __bextri_u32(a, b) (__builtin_ia32_bextri_u32((a), (b)))
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
index 120d73c34f57..2ecc730e90e9 100644
--- a/lib/Headers/tmmintrin.h
+++ b/lib/Headers/tmmintrin.h
@@ -24,10 +24,14 @@
#ifndef __TMMINTRIN_H
#define __TMMINTRIN_H
+#ifndef __SSSE3__
+#error "SSSE3 instruction set not enabled"
+#else
+
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_abs_pi8(__m64 __a)
@@ -221,4 +225,6 @@ _mm_sign_pi32(__m64 __a, __m64 __b)
#undef __DEFAULT_FN_ATTRS
+#endif /* __SSSE3__ */
+
#endif /* __TMMINTRIN_H */
diff --git a/lib/Headers/vecintrin.h b/lib/Headers/vecintrin.h
new file mode 100644
index 000000000000..ca7acb4731f9
--- /dev/null
+++ b/lib/Headers/vecintrin.h
@@ -0,0 +1,8946 @@
+/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if defined(__s390x__) && defined(__VEC__)
+
+#define __ATTRS_ai __attribute__((__always_inline__))
+#define __ATTRS_o __attribute__((__overloadable__))
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+#define __constant(PARM) \
+ __attribute__((__enable_if__ ((PARM) == (PARM), \
+ "argument must be a constant integer")))
+#define __constant_range(PARM, LOW, HIGH) \
+ __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \
+ "argument must be a constant integer from " #LOW " to " #HIGH)))
+#define __constant_pow2_range(PARM, LOW, HIGH) \
+ __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \
+ ((PARM) & ((PARM) - 1)) == 0, \
+ "argument must be a constant power of 2 from " #LOW " to " #HIGH)))
+
+/*-- __lcbb -----------------------------------------------------------------*/
+
+extern __ATTRS_o unsigned int
+__lcbb(const void *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \
+ __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \
+ ((Y) == 64 ? 0 : \
+ (Y) == 128 ? 1 : \
+ (Y) == 256 ? 2 : \
+ (Y) == 512 ? 3 : \
+ (Y) == 1024 ? 4 : \
+ (Y) == 2048 ? 5 : \
+ (Y) == 4096 ? 6 : 0) : 0))
+
+/*-- vec_extract ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai signed char
+vec_extract(vector signed char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(vector bool char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(vector unsigned char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai signed short
+vec_extract(vector signed short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(vector bool short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(vector unsigned short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai signed int
+vec_extract(vector signed int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(vector bool int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(vector unsigned int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai signed long long
+vec_extract(vector signed long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(vector bool long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(vector unsigned long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai double
+vec_extract(vector double __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+/*-- vec_insert -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_insert(signed char __scalar, vector signed char __vec, int __index) {
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert(unsigned char __scalar, vector bool char __vec, int __index) {
+ vector unsigned char __newvec = (vector unsigned char)__vec;
+ __newvec[__index & 15] = (unsigned char)__scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) {
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_insert(signed short __scalar, vector signed short __vec, int __index) {
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert(unsigned short __scalar, vector bool short __vec, int __index) {
+ vector unsigned short __newvec = (vector unsigned short)__vec;
+ __newvec[__index & 7] = (unsigned short)__scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) {
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_insert(signed int __scalar, vector signed int __vec, int __index) {
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert(unsigned int __scalar, vector bool int __vec, int __index) {
+ vector unsigned int __newvec = (vector unsigned int)__vec;
+ __newvec[__index & 3] = __scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) {
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_insert(signed long long __scalar, vector signed long long __vec,
+ int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert(unsigned long long __scalar, vector bool long long __vec,
+ int __index) {
+ vector unsigned long long __newvec = (vector unsigned long long)__vec;
+ __newvec[__index & 1] = __scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert(unsigned long long __scalar, vector unsigned long long __vec,
+ int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_insert(double __scalar, vector double __vec, int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+/*-- vec_promote ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_promote(signed char __scalar, int __index) {
+ const vector signed char __zero = (vector signed char)0;
+ vector signed char __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_promote(unsigned char __scalar, int __index) {
+ const vector unsigned char __zero = (vector unsigned char)0;
+ vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_promote(signed short __scalar, int __index) {
+ const vector signed short __zero = (vector signed short)0;
+ vector signed short __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_promote(unsigned short __scalar, int __index) {
+ const vector unsigned short __zero = (vector unsigned short)0;
+ vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_promote(signed int __scalar, int __index) {
+ const vector signed int __zero = (vector signed int)0;
+ vector signed int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_promote(unsigned int __scalar, int __index) {
+ const vector unsigned int __zero = (vector unsigned int)0;
+ vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_promote(signed long long __scalar, int __index) {
+ const vector signed long long __zero = (vector signed long long)0;
+ vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_promote(unsigned long long __scalar, int __index) {
+ const vector unsigned long long __zero = (vector unsigned long long)0;
+ vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_promote(double __scalar, int __index) {
+ const vector double __zero = (vector double)0;
+ vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+/*-- vec_insert_and_zero ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_insert_and_zero(const signed char *__ptr) {
+ vector signed char __vec = (vector signed char)0;
+ __vec[7] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert_and_zero(const unsigned char *__ptr) {
+ vector unsigned char __vec = (vector unsigned char)0;
+ __vec[7] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_insert_and_zero(const signed short *__ptr) {
+ vector signed short __vec = (vector signed short)0;
+ __vec[3] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert_and_zero(const unsigned short *__ptr) {
+ vector unsigned short __vec = (vector unsigned short)0;
+ __vec[3] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_insert_and_zero(const signed int *__ptr) {
+ vector signed int __vec = (vector signed int)0;
+ __vec[1] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert_and_zero(const unsigned int *__ptr) {
+ vector unsigned int __vec = (vector unsigned int)0;
+ __vec[1] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_insert_and_zero(const signed long long *__ptr) {
+ vector signed long long __vec = (vector signed long long)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert_and_zero(const unsigned long long *__ptr) {
+ vector unsigned long long __vec = (vector unsigned long long)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_insert_and_zero(const double *__ptr) {
+ vector double __vec = (vector double)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+/*-- vec_perm ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_perm(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return (vector signed char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_perm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector unsigned char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_perm(vector bool char __a, vector bool char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_perm(vector signed short __a, vector signed short __b,
+ vector unsigned char __c) {
+ return (vector signed short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_perm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c) {
+ return (vector unsigned short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_perm(vector bool short __a, vector bool short __b,
+ vector unsigned char __c) {
+ return (vector bool short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_perm(vector signed int __a, vector signed int __b,
+ vector unsigned char __c) {
+ return (vector signed int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_perm(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned char __c) {
+ return (vector unsigned int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_perm(vector bool int __a, vector bool int __b,
+ vector unsigned char __c) {
+ return (vector bool int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_perm(vector signed long long __a, vector signed long long __b,
+ vector unsigned char __c) {
+ return (vector signed long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+ return (vector unsigned long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c) {
+ return (vector bool long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_perm(vector double __a, vector double __b,
+ vector unsigned char __c) {
+ return (vector double)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+/*-- vec_permi --------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed long long
+vec_permi(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned long long
+vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector bool long long
+vec_permi(vector bool long long __a, vector bool long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector double
+vec_permi(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 3);
+
+#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
+ __builtin_s390_vpdi((vector unsigned long long)(X), \
+ (vector unsigned long long)(Y), \
+ (((Z) & 2) << 1) | ((Z) & 1)))
+
+/*-- vec_sel ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sel(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
+ return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
+ return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector bool char __c) {
+ return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sel(vector signed short __a, vector signed short __b,
+ vector unsigned short __c) {
+ return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sel(vector signed short __a, vector signed short __b,
+ vector bool short __c) {
+ return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sel(vector bool short __a, vector bool short __b,
+ vector unsigned short __c) {
+ return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector bool short __c) {
+ return (((vector unsigned short)__c & __b) |
+ (~(vector unsigned short)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sel(vector signed int __a, vector signed int __b,
+ vector unsigned int __c) {
+ return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) {
+ return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
+ return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sel(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
+ return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector unsigned long long __c) {
+ return (((vector signed long long)__c & __b) |
+ (~(vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector bool long long __c) {
+ return (((vector signed long long)__c & __b) |
+ (~(vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector unsigned long long __c) {
+ return (((vector bool long long)__c & __b) |
+ (~(vector bool long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector bool long long __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector bool long long __c) {
+ return (((vector unsigned long long)__c & __b) |
+ (~(vector unsigned long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
+ return (vector double)((__c & (vector unsigned long long)__b) |
+ (~__c & (vector unsigned long long)__a));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_sel(vector double __a, vector double __b, vector bool long long __c) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ vector unsigned long long __cc = (vector unsigned long long)__c;
+ return (vector double)((__cc & __bc) | (~__cc & __ac));
+}
+
+/*-- vec_gather_element -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed int
+vec_gather_element(vector signed int __vec, vector unsigned int __offset,
+ const signed int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const signed int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_gather_element(vector bool int __vec, vector unsigned int __offset,
+ const unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const unsigned int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gather_element(vector unsigned int __vec, vector unsigned int __offset,
+ const unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const unsigned int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_gather_element(vector signed long long __vec,
+ vector unsigned long long __offset,
+ const signed long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const signed long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_gather_element(vector bool long long __vec,
+ vector unsigned long long __offset,
+ const unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const unsigned long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gather_element(vector unsigned long long __vec,
+ vector unsigned long long __offset,
+ const unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const unsigned long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_gather_element(vector double __vec, vector unsigned long long __offset,
+ const double *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const double *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+/*-- vec_scatter_element ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector signed int __vec, vector unsigned int __offset,
+ signed int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector bool int __vec, vector unsigned int __offset,
+ unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset,
+ unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector signed long long __vec,
+ vector unsigned long long __offset,
+ signed long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector bool long long __vec,
+ vector unsigned long long __offset,
+ unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector unsigned long long __vec,
+ vector unsigned long long __offset,
+ unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector double __vec, vector unsigned long long __offset,
+ double *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+/*-- vec_xld2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_xld2(long __offset, const signed char *__ptr) {
+ return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xld2(long __offset, const unsigned char *__ptr) {
+ return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_xld2(long __offset, const signed short *__ptr) {
+ return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xld2(long __offset, const unsigned short *__ptr) {
+ return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_xld2(long __offset, const signed int *__ptr) {
+ return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_xld2(long __offset, const unsigned int *__ptr) {
+ return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_xld2(long __offset, const signed long long *__ptr) {
+ return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_xld2(long __offset, const unsigned long long *__ptr) {
+ return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_xld2(long __offset, const double *__ptr) {
+ return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+/*-- vec_xlw4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_xlw4(long __offset, const signed char *__ptr) {
+ return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xlw4(long __offset, const unsigned char *__ptr) {
+ return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_xlw4(long __offset, const signed short *__ptr) {
+ return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xlw4(long __offset, const unsigned short *__ptr) {
+ return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_xlw4(long __offset, const signed int *__ptr) {
+ return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_xlw4(long __offset, const unsigned int *__ptr) {
+ return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+/*-- vec_xstd2 --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) {
+ *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) {
+ *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) {
+ *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed long long __vec, long __offset,
+ signed long long *__ptr) {
+ *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned long long __vec, long __offset,
+ unsigned long long *__ptr) {
+ *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
+ __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector double __vec, long __offset, double *__ptr) {
+ *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+/*-- vec_xstw4 --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) {
+ *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) {
+ *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) {
+ *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+/*-- vec_load_bndry ---------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_load_bndry(const signed char *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned char
+vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed short
+vec_load_bndry(const signed short *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned short
+vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed int
+vec_load_bndry(const signed int *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned int
+vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed long long
+vec_load_bndry(const signed long long *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned long long
+vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector double
+vec_load_bndry(const double *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \
+ __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \
+ (Y) == 128 ? 1 : \
+ (Y) == 256 ? 2 : \
+ (Y) == 512 ? 3 : \
+ (Y) == 1024 ? 4 : \
+ (Y) == 2048 ? 5 : \
+ (Y) == 4096 ? 6 : -1)))
+
+/*-- vec_load_len -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_load_len(const signed char *__ptr, unsigned int __len) {
+ return (vector signed char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_load_len(const unsigned char *__ptr, unsigned int __len) {
+ return (vector unsigned char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_load_len(const signed short *__ptr, unsigned int __len) {
+ return (vector signed short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_load_len(const unsigned short *__ptr, unsigned int __len) {
+ return (vector unsigned short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_load_len(const signed int *__ptr, unsigned int __len) {
+ return (vector signed int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_load_len(const unsigned int *__ptr, unsigned int __len) {
+ return (vector unsigned int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_load_len(const signed long long *__ptr, unsigned int __len) {
+ return (vector signed long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
+ return (vector unsigned long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_load_len(const double *__ptr, unsigned int __len) {
+ return (vector double)__builtin_s390_vll(__len, __ptr);
+}
+
+/*-- vec_store_len ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed char __vec, signed char *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned char __vec, unsigned char *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed short __vec, signed short *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned short __vec, unsigned short *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed int __vec, signed int *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned int __vec, unsigned int *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed long long __vec, signed long long *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector double __vec, double *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+/*-- vec_load_pair ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_load_pair(signed long long __a, signed long long __b) {
+ return (vector signed long long)(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_load_pair(unsigned long long __a, unsigned long long __b) {
+ return (vector unsigned long long)(__a, __b);
+}
+
+/*-- vec_genmask ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_genmask(unsigned short __mask)
+ __constant(__mask) {
+ return (vector unsigned char)(
+ __mask & 0x8000 ? 0xff : 0,
+ __mask & 0x4000 ? 0xff : 0,
+ __mask & 0x2000 ? 0xff : 0,
+ __mask & 0x1000 ? 0xff : 0,
+ __mask & 0x0800 ? 0xff : 0,
+ __mask & 0x0400 ? 0xff : 0,
+ __mask & 0x0200 ? 0xff : 0,
+ __mask & 0x0100 ? 0xff : 0,
+ __mask & 0x0080 ? 0xff : 0,
+ __mask & 0x0040 ? 0xff : 0,
+ __mask & 0x0020 ? 0xff : 0,
+ __mask & 0x0010 ? 0xff : 0,
+ __mask & 0x0008 ? 0xff : 0,
+ __mask & 0x0004 ? 0xff : 0,
+ __mask & 0x0002 ? 0xff : 0,
+ __mask & 0x0001 ? 0xff : 0);
+}
+
+/*-- vec_genmasks_* ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_genmasks_8(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 7;
+ unsigned char __bit2 = __last & 7;
+ unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1;
+ unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1;
+ unsigned char __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned char)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_genmasks_16(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 15;
+ unsigned char __bit2 = __last & 15;
+ unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1;
+ unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1;
+ unsigned short __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned short)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_genmasks_32(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 31;
+ unsigned char __bit2 = __last & 31;
+ unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1;
+ unsigned int __mask2 = (1U << (31 - __bit2)) - 1;
+ unsigned int __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned int)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_genmasks_64(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 63;
+ unsigned char __bit2 = __last & 63;
+ unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1;
+ unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1;
+ unsigned long long __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned long long)__value;
+}
+
+/*-- vec_splat --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_splat(vector signed char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector signed char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_splat(vector bool char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector bool char)(vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_splat(vector unsigned char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_splat(vector signed short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector signed short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_splat(vector bool short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector bool short)(vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_splat(vector unsigned short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_splat(vector signed int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector signed int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_splat(vector bool int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector bool int)(vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_splat(vector unsigned int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_splat(vector signed long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector signed long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_splat(vector bool long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector bool long long)(vector unsigned long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_splat(vector unsigned long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector unsigned long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector double
+vec_splat(vector double __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector double)__vec[__index];
+}
+
+/*-- vec_splat_s* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector signed char
+vec_splat_s8(signed char __scalar)
+ __constant(__scalar) {
+ return (vector signed char)__scalar;
+}
+
+static inline __ATTRS_ai vector signed short
+vec_splat_s16(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed short)__scalar;
+}
+
+static inline __ATTRS_ai vector signed int
+vec_splat_s32(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai vector signed long long
+vec_splat_s64(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed long long)(signed long)__scalar;
+}
+
+/*-- vec_splat_u* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_splat_u8(unsigned char __scalar)
+ __constant(__scalar) {
+ return (vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned short
+vec_splat_u16(unsigned short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned int
+vec_splat_u32(signed short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned long long
+vec_splat_u64(signed short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned long long)(signed long long)__scalar;
+}
+
+/*-- vec_splats -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_splats(signed char __scalar) {
+ return (vector signed char)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_splats(unsigned char __scalar) {
+ return (vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_splats(signed short __scalar) {
+ return (vector signed short)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_splats(unsigned short __scalar) {
+ return (vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_splats(signed int __scalar) {
+ return (vector signed int)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_splats(unsigned int __scalar) {
+ return (vector unsigned int)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_splats(signed long long __scalar) {
+ return (vector signed long long)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_splats(unsigned long long __scalar) {
+ return (vector unsigned long long)__scalar;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_splats(double __scalar) {
+ return (vector double)__scalar;
+}
+
+/*-- vec_extend_s64 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed char __a) {
+ return (vector signed long long)(__a[7], __a[15]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed short __a) {
+ return (vector signed long long)(__a[3], __a[7]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed int __a) {
+ return (vector signed long long)(__a[1], __a[3]);
+}
+
+/*-- vec_mergeh -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mergeh(vector signed char __a, vector signed char __b) {
+ return (vector signed char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_mergeh(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mergeh(vector signed short __a, vector signed short __b) {
+ return (vector signed short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_mergeh(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mergeh(vector signed int __a, vector signed int __b) {
+ return (vector signed int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_mergeh(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mergeh(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_mergeh(vector double __a, vector double __b) {
+ return (vector double)(__a[0], __b[0]);
+}
+
+/*-- vec_mergel -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mergel(vector signed char __a, vector signed char __b) {
+ return (vector signed char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_mergel(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mergel(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mergel(vector signed short __a, vector signed short __b) {
+ return (vector signed short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_mergel(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mergel(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mergel(vector signed int __a, vector signed int __b) {
+ return (vector signed int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_mergel(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mergel(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mergel(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_mergel(vector double __a, vector double __b) {
+ return (vector double)(__a[1], __b[1]);
+}
+
+/*-- vec_pack ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_pack(vector signed short __a, vector signed short __b) {
+ vector signed char __ac = (vector signed char)__a;
+ vector signed char __bc = (vector signed char)__b;
+ return (vector signed char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_pack(vector bool short __a, vector bool short __b) {
+ vector bool char __ac = (vector bool char)__a;
+ vector bool char __bc = (vector bool char)__b;
+ return (vector bool char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_pack(vector unsigned short __a, vector unsigned short __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return (vector unsigned char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_pack(vector signed int __a, vector signed int __b) {
+ vector signed short __ac = (vector signed short)__a;
+ vector signed short __bc = (vector signed short)__b;
+ return (vector signed short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_pack(vector bool int __a, vector bool int __b) {
+ vector bool short __ac = (vector bool short)__a;
+ vector bool short __bc = (vector bool short)__b;
+ return (vector bool short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_pack(vector unsigned int __a, vector unsigned int __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return (vector unsigned short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_pack(vector signed long long __a, vector signed long long __b) {
+ vector signed int __ac = (vector signed int)__a;
+ vector signed int __bc = (vector signed int)__b;
+ return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_pack(vector bool long long __a, vector bool long long __b) {
+ vector bool int __ac = (vector bool int)__a;
+ vector bool int __bc = (vector bool int)__b;
+ return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+/*-- vec_packs --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_packs(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vpksh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_packs(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vpksf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packs(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_packs(vector signed long long __a, vector signed long long __b) {
+ return __builtin_s390_vpksg(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packs_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return __builtin_s390_vpkshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return __builtin_s390_vpksfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_packs_cc(vector signed long long __a, vector signed long long __b,
+ int *__cc) {
+ return __builtin_s390_vpksgs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b,
+ int *__cc) {
+ return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_packsu -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu(vector signed short __a, vector signed short __b) {
+ const vector signed short __zero = (vector signed short)0;
+ return __builtin_s390_vpklsh(
+ (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a,
+ (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu(vector signed int __a, vector signed int __b) {
+ const vector signed int __zero = (vector signed int)0;
+ return __builtin_s390_vpklsf(
+ (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a,
+ (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu(vector signed long long __a, vector signed long long __b) {
+ const vector signed long long __zero = (vector signed long long)0;
+ return __builtin_s390_vpklsg(
+ (vector unsigned long long)(__a >= __zero) &
+ (vector unsigned long long)__a,
+ (vector unsigned long long)(__b >= __zero) &
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packsu_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b,
+ int *__cc) {
+ return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_unpackh ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_unpackh(vector signed char __a) {
+ return __builtin_s390_vuphb(__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_unpackh(vector bool char __a) {
+ return (vector bool short)__builtin_s390_vuphb((vector signed char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_unpackh(vector unsigned char __a) {
+ return __builtin_s390_vuplhb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_unpackh(vector signed short __a) {
+ return __builtin_s390_vuphh(__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_unpackh(vector bool short __a) {
+ return (vector bool int)__builtin_s390_vuphh((vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_unpackh(vector unsigned short __a) {
+ return __builtin_s390_vuplhh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_unpackh(vector signed int __a) {
+ return __builtin_s390_vuphf(__a);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_unpackh(vector bool int __a) {
+ return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_unpackh(vector unsigned int __a) {
+ return __builtin_s390_vuplhf(__a);
+}
+
+/*-- vec_unpackl ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_unpackl(vector signed char __a) {
+ return __builtin_s390_vuplb(__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_unpackl(vector bool char __a) {
+ return (vector bool short)__builtin_s390_vuplb((vector signed char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_unpackl(vector unsigned char __a) {
+ return __builtin_s390_vupllb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_unpackl(vector signed short __a) {
+ return __builtin_s390_vuplhw(__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_unpackl(vector bool short __a) {
+ return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_unpackl(vector unsigned short __a) {
+ return __builtin_s390_vupllh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_unpackl(vector signed int __a) {
+ return __builtin_s390_vuplf(__a);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_unpackl(vector bool int __a) {
+ return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_unpackl(vector unsigned int __a) {
+ return __builtin_s390_vupllf(__a);
+}
+
+/*-- vec_cmpeq --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector double __a, vector double __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+/*-- vec_cmpge --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpge(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpge(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpge(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+/*-- vec_cmpgt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpgt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpgt(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpgt(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector double __a, vector double __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+/*-- vec_cmple --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmple(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmple(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmple(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmple(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmple(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmple(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector double __a, vector double __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+/*-- vec_cmplt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmplt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmplt(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmplt(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector double __a, vector double __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+/*-- vec_all_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_nge ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_ngt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_ngt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nle ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nle(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nlt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nlt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nan ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nan(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_numeric --------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_numeric(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_any_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_nge ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_ngt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_ngt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nle ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nle(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nlt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nlt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nan ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nan(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc != 3;
+}
+
+/*-- vec_any_numeric --------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_numeric(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_andc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_andc(vector bool char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector signed char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector bool char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector signed char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector unsigned char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector bool char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector unsigned char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_andc(vector bool short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector signed short __a, vector signed short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector bool short __a, vector signed short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector signed short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector unsigned short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector bool short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector unsigned short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_andc(vector bool int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector signed int __a, vector signed int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector bool int __a, vector signed int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector signed int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector unsigned int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector bool int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector unsigned int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_andc(vector bool long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector bool long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector bool long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector double __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector bool long long __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector double __a, vector bool long long __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+/*-- vec_nor ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_nor(vector bool char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector signed char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector bool char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector signed char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector unsigned char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector bool char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector unsigned char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_nor(vector bool short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector signed short __a, vector signed short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector bool short __a, vector signed short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector signed short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector unsigned short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector bool short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector unsigned short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_nor(vector bool int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector signed int __a, vector signed int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector bool int __a, vector signed int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector signed int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector unsigned int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector bool int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector unsigned int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_nor(vector bool long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector signed long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector bool long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector signed long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector bool long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector unsigned long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector double __a, vector double __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector bool long long __a, vector double __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector double __a, vector bool long long __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+/*-- vec_cntlz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cntlz(vector signed char __a) {
+ return __builtin_s390_vclzb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cntlz(vector unsigned char __a) {
+ return __builtin_s390_vclzb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cntlz(vector signed short __a) {
+ return __builtin_s390_vclzh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cntlz(vector unsigned short __a) {
+ return __builtin_s390_vclzh(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cntlz(vector signed int __a) {
+ return __builtin_s390_vclzf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cntlz(vector unsigned int __a) {
+ return __builtin_s390_vclzf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cntlz(vector signed long long __a) {
+ return __builtin_s390_vclzg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cntlz(vector unsigned long long __a) {
+ return __builtin_s390_vclzg(__a);
+}
+
+/*-- vec_cnttz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cnttz(vector signed char __a) {
+ return __builtin_s390_vctzb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cnttz(vector unsigned char __a) {
+ return __builtin_s390_vctzb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cnttz(vector signed short __a) {
+ return __builtin_s390_vctzh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cnttz(vector unsigned short __a) {
+ return __builtin_s390_vctzh(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cnttz(vector signed int __a) {
+ return __builtin_s390_vctzf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cnttz(vector unsigned int __a) {
+ return __builtin_s390_vctzf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cnttz(vector signed long long __a) {
+ return __builtin_s390_vctzg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cnttz(vector unsigned long long __a) {
+ return __builtin_s390_vctzg(__a);
+}
+
+/*-- vec_popcnt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_popcnt(vector signed char __a) {
+ return __builtin_s390_vpopctb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_popcnt(vector unsigned char __a) {
+ return __builtin_s390_vpopctb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_popcnt(vector signed short __a) {
+ return __builtin_s390_vpopcth((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_popcnt(vector unsigned short __a) {
+ return __builtin_s390_vpopcth(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_popcnt(vector signed int __a) {
+ return __builtin_s390_vpopctf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_popcnt(vector unsigned int __a) {
+ return __builtin_s390_vpopctf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_popcnt(vector signed long long __a) {
+ return __builtin_s390_vpopctg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_popcnt(vector unsigned long long __a) {
+ return __builtin_s390_vpopctg(__a);
+}
+
+/*-- vec_rl -----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_rl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_verllvb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_rl(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_verllvb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_rl(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_verllvh(
+ (vector unsigned short)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_rl(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_verllvh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_rl(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_verllvf(
+ (vector unsigned int)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_rl(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_verllvf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_rl(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_verllvg(
+ (vector unsigned long long)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_verllvg(__a, __b);
+}
+
+/*-- vec_rli ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_rli(vector signed char __a, unsigned long __b) {
+ return (vector signed char)__builtin_s390_verllb(
+ (vector unsigned char)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_rli(vector unsigned char __a, unsigned long __b) {
+ return __builtin_s390_verllb(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_rli(vector signed short __a, unsigned long __b) {
+ return (vector signed short)__builtin_s390_verllh(
+ (vector unsigned short)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_rli(vector unsigned short __a, unsigned long __b) {
+ return __builtin_s390_verllh(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_rli(vector signed int __a, unsigned long __b) {
+ return (vector signed int)__builtin_s390_verllf(
+ (vector unsigned int)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_rli(vector unsigned int __a, unsigned long __b) {
+ return __builtin_s390_verllf(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_rli(vector signed long long __a, unsigned long __b) {
+ return (vector signed long long)__builtin_s390_verllg(
+ (vector unsigned long long)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_rli(vector unsigned long long __a, unsigned long __b) {
+ return __builtin_s390_verllg(__a, (int)__b);
+}
+
+/*-- vec_rl_mask ------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_rl_mask(vector signed char __a, vector unsigned char __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned char
+vec_rl_mask(vector unsigned char __a, vector unsigned char __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed short
+vec_rl_mask(vector signed short __a, vector unsigned short __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned short
+vec_rl_mask(vector unsigned short __a, vector unsigned short __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed int
+vec_rl_mask(vector signed int __a, vector unsigned int __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned int
+vec_rl_mask(vector unsigned int __a, vector unsigned int __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed long long
+vec_rl_mask(vector signed long long __a, vector unsigned long long __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned long long
+vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned char __c) __constant(__c);
+
+#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
+ __extension__ ({ \
+ vector unsigned char __res; \
+ vector unsigned char __x = (vector unsigned char)(X); \
+ vector unsigned char __y = (vector unsigned char)(Y); \
+ switch (sizeof ((X)[0])) { \
+ case 1: __res = (vector unsigned char) __builtin_s390_verimb( \
+ (vector unsigned char)__x, (vector unsigned char)__x, \
+ (vector unsigned char)__y, (Z)); break; \
+ case 2: __res = (vector unsigned char) __builtin_s390_verimh( \
+ (vector unsigned short)__x, (vector unsigned short)__x, \
+ (vector unsigned short)__y, (Z)); break; \
+ case 4: __res = (vector unsigned char) __builtin_s390_verimf( \
+ (vector unsigned int)__x, (vector unsigned int)__x, \
+ (vector unsigned int)__y, (Z)); break; \
+ default: __res = (vector unsigned char) __builtin_s390_verimg( \
+ (vector unsigned long long)__x, (vector unsigned long long)__x, \
+ (vector unsigned long long)__y, (Z)); break; \
+ } __res; }))
+
+/*-- vec_sll ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsl(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_slb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_slb(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_slb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vslb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_slb(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vslb(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_slb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vslb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_slb(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_slb(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_slb(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_slb(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_slb(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_slb(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_slb(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_slb(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_slb(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_slb(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_slb(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_slb(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_slb(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_slb(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_sld ----------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_sld(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned char
+vec_sld(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed short
+vec_sld(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned short
+vec_sld(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed int
+vec_sld(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned int
+vec_sld(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed long long
+vec_sld(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned long long
+vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector double
+vec_sld(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 15);
+
+#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
+ __builtin_s390_vsldb((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z)))
+
+/*-- vec_sldw ---------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_sldw(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned char
+vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed short
+vec_sldw(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned short
+vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed int
+vec_sldw(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned int
+vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed long long
+vec_sldw(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned long long
+vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector double
+vec_sldw(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 3);
+
+#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
+ __builtin_s390_vsldb((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z) * 4))
+
+/*-- vec_sral ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsra(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srab ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srab(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srab(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrab(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srab(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vsrab(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srab(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrab(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srab(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srab(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srab(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srab(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srab(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srab(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srab(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srab(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srab(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srab(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srab(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srab(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srab(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srab(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srl ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrl(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srb(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srb(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vsrlb(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrlb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srb(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srb(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srb(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srb(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srb(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srb(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srb(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srb(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srb(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srb(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srb(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srb(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srb(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srb(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_abs ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_abs(vector signed char __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_abs(vector signed short __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_abs(vector signed int __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_abs(vector signed long long __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_abs(vector double __a) {
+ return __builtin_s390_vflpdb(__a);
+}
+
+/*-- vec_nabs ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_nabs(vector double __a) {
+ return __builtin_s390_vflndb(__a);
+}
+
+/*-- vec_max ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector signed char __a, vector signed char __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector signed char __a, vector bool char __b) {
+ vector signed char __bc = (vector signed char)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector bool char __a, vector signed char __b) {
+ vector signed char __ac = (vector signed char)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector unsigned char __a, vector bool char __b) {
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector bool char __a, vector unsigned char __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector signed short __a, vector signed short __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector signed short __a, vector bool short __b) {
+ vector signed short __bc = (vector signed short)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector bool short __a, vector signed short __b) {
+ vector signed short __ac = (vector signed short)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector unsigned short __a, vector bool short __b) {
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector bool short __a, vector unsigned short __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector signed int __a, vector signed int __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector signed int __a, vector bool int __b) {
+ vector signed int __bc = (vector signed int)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector bool int __a, vector signed int __b) {
+ vector signed int __ac = (vector signed int)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector unsigned int __a, vector bool int __b) {
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector bool int __a, vector unsigned int __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector signed long long __a, vector signed long long __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector signed long long __a, vector bool long long __b) {
+ vector signed long long __bc = (vector signed long long)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector bool long long __a, vector signed long long __b) {
+ vector signed long long __ac = (vector signed long long)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector unsigned long long __a, vector bool long long __b) {
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector bool long long __a, vector unsigned long long __b) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_max(vector double __a, vector double __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+/*-- vec_min ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector signed char __a, vector signed char __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector signed char __a, vector bool char __b) {
+ vector signed char __bc = (vector signed char)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector bool char __a, vector signed char __b) {
+ vector signed char __ac = (vector signed char)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector unsigned char __a, vector bool char __b) {
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector bool char __a, vector unsigned char __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector signed short __a, vector signed short __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector signed short __a, vector bool short __b) {
+ vector signed short __bc = (vector signed short)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector bool short __a, vector signed short __b) {
+ vector signed short __ac = (vector signed short)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector unsigned short __a, vector bool short __b) {
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector bool short __a, vector unsigned short __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector signed int __a, vector signed int __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector signed int __a, vector bool int __b) {
+ vector signed int __bc = (vector signed int)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector bool int __a, vector signed int __b) {
+ vector signed int __ac = (vector signed int)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector unsigned int __a, vector bool int __b) {
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector bool int __a, vector unsigned int __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector signed long long __a, vector signed long long __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector signed long long __a, vector bool long long __b) {
+ vector signed long long __bc = (vector signed long long)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector bool long long __a, vector signed long long __b) {
+ vector signed long long __ac = (vector signed long long)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector unsigned long long __a, vector bool long long __b) {
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector bool long long __a, vector unsigned long long __b) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_min(vector double __a, vector double __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+/*-- vec_add_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaq(__a, __b);
+}
+
+/*-- vec_addc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_addc(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaccb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_addc(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vacch(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_addc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vaccf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_addc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vaccg(__a, __b);
+}
+
+/*-- vec_addc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaccq(__a, __b);
+}
+
+/*-- vec_adde_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vacq(__a, __b, __c);
+}
+
+/*-- vec_addec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vacccq(__a, __b, __c);
+}
+
+/*-- vec_avg ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_avg(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vavgb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_avg(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vavgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_avg(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vavgf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_avg(vector signed long long __a, vector signed long long __b) {
+ return __builtin_s390_vavgg(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_avg(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vavglb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_avg(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vavglh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_avg(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vavglf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_avg(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vavglg(__a, __b);
+}
+
+/*-- vec_checksum -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned int
+vec_checksum(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vcksm(__a, __b);
+}
+
+/*-- vec_gfmsum -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_gfmsum(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vgfmb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gfmsum(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vgfmh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gfmsum(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vgfmf(__a, __b);
+}
+
+/*-- vec_gfmsum_128 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vgfmg(__a, __b);
+}
+
+/*-- vec_gfmsum_accum -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vgfmab(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vgfmah(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vgfmaf(__a, __b, __c);
+}
+
+/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_gfmsum_accum_128(vector unsigned long long __a,
+ vector unsigned long long __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vgfmag(__a, __b, __c);
+}
+
+/*-- vec_mladd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector unsigned char __a, vector signed char __b,
+ vector signed char __c) {
+ return (vector signed char)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector signed char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __a * (vector signed char)__b + (vector signed char)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mladd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return (vector signed short)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * (vector signed short)__b + (vector signed short)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mladd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector unsigned int __a, vector signed int __b,
+ vector signed int __c) {
+ return (vector signed int)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector signed int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __a * (vector signed int)__b + (vector signed int)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mladd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __a * __b + __c;
+}
+
+/*-- vec_mhadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mhadd(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __builtin_s390_vmahb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mhadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vmalhb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mhadd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_s390_vmahh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mhadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmalhh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mhadd(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return __builtin_s390_vmahf(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mhadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmalhf(__a, __b, __c);
+}
+
+/*-- vec_meadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_meadd(vector signed char __a, vector signed char __b,
+ vector signed short __c) {
+ return __builtin_s390_vmaeb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_meadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmaleb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_meadd(vector signed short __a, vector signed short __b,
+ vector signed int __c) {
+ return __builtin_s390_vmaeh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_meadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmaleh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_meadd(vector signed int __a, vector signed int __b,
+ vector signed long long __c) {
+ return __builtin_s390_vmaef(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_meadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vmalef(__a, __b, __c);
+}
+
+/*-- vec_moadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_moadd(vector signed char __a, vector signed char __b,
+ vector signed short __c) {
+ return __builtin_s390_vmaob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_moadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmalob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_moadd(vector signed short __a, vector signed short __b,
+ vector signed int __c) {
+ return __builtin_s390_vmaoh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_moadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmaloh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_moadd(vector signed int __a, vector signed int __b,
+ vector signed long long __c) {
+ return __builtin_s390_vmaof(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_moadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vmalof(__a, __b, __c);
+}
+
+/*-- vec_mulh ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mulh(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mulh(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmlhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mulh(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mulh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmlhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mulh(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmhf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlhf(__a, __b);
+}
+
+/*-- vec_mule ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_mule(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mule(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmleb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mule(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mule(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmleh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mule(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmef(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mule(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlef(__a, __b);
+}
+
+/*-- vec_mulo ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_mulo(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmob(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mulo(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmlob(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mulo(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmoh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mulo(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmloh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mulo(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmof(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlof(__a, __b);
+}
+
+/*-- vec_sub_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsq(__a, __b);
+}
+
+/*-- vec_subc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_subc(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vscbib(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_subc(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vscbih(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_subc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vscbif(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_subc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vscbig(__a, __b);
+}
+
+/*-- vec_subc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vscbiq(__a, __b);
+}
+
+/*-- vec_sube_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vsbiq(__a, __b, __c);
+}
+
+/*-- vec_subec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vsbcbiq(__a, __b, __c);
+}
+
+/*-- vec_sum2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sum2(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vsumgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sum2(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vsumgf(__a, __b);
+}
+
+/*-- vec_sum_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sum_u128(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vsumqf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vsumqg(__a, __b);
+}
+
+/*-- vec_sum4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sum4(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsumb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sum4(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vsumh(__a, __b);
+}
+
+/*-- vec_test_mask ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed char __a, vector unsigned char __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vtm(__a, __b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed short __a, vector unsigned short __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed int __a, vector unsigned int __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector double __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+/*-- vec_madd ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_madd(vector double __a, vector double __b, vector double __c) {
+ return __builtin_s390_vfmadb(__a, __b, __c);
+}
+
+/*-- vec_msub ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_msub(vector double __a, vector double __b, vector double __c) {
+ return __builtin_s390_vfmsdb(__a, __b, __c);
+}
+
+/*-- vec_sqrt ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_sqrt(vector double __a) {
+ return __builtin_s390_vfsqdb(__a);
+}
+
+/*-- vec_ld2f ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_ld2f(const float *__ptr) {
+ typedef float __v2f32 __attribute__((__vector_size__(8)));
+ return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double);
+}
+
+/*-- vec_st2f ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai void
+vec_st2f(vector double __a, float *__ptr) {
+ typedef float __v2f32 __attribute__((__vector_size__(8)));
+ *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
+}
+
+/*-- vec_ctd ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector double
+vec_ctd(vector signed long long __a, int __b)
+ __constant_range(__b, 0, 31) {
+ vector double __conv = __builtin_convertvector(__a, vector double);
+ __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __conv;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_ctd(vector unsigned long long __a, int __b)
+ __constant_range(__b, 0, 31) {
+ vector double __conv = __builtin_convertvector(__a, vector double);
+ __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __conv;
+}
+
+/*-- vec_ctsl ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_ctsl(vector double __a, int __b)
+ __constant_range(__b, 0, 31) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector signed long long);
+}
+
+/*-- vec_ctul ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_ctul(vector double __a, int __b)
+ __constant_range(__b, 0, 31) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector unsigned long long);
+}
+
+/*-- vec_roundp -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundp(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_ceil ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_ceil(vector double __a) {
+ // On this platform, vec_ceil never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_roundm -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundm(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_floor --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_floor(vector double __a) {
+ // On this platform, vec_floor never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_roundz -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundz(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_trunc --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_trunc(vector double __a) {
+ // On this platform, vec_trunc never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_roundc -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundc(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 0);
+}
+
+/*-- vec_round --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_round(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 4);
+}
+
+/*-- vec_fp_test_data_class -------------------------------------------------*/
+
+#define vec_fp_test_data_class(X, Y, Z) \
+ ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
+
+/*-- vec_cp_until_zero ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cp_until_zero(vector signed char __a) {
+ return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cp_until_zero(vector bool char __a) {
+ return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cp_until_zero(vector unsigned char __a) {
+ return __builtin_s390_vistrb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cp_until_zero(vector signed short __a) {
+ return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cp_until_zero(vector bool short __a) {
+ return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cp_until_zero(vector unsigned short __a) {
+ return __builtin_s390_vistrh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cp_until_zero(vector signed int __a) {
+ return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cp_until_zero(vector bool int __a) {
+ return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cp_until_zero(vector unsigned int __a) {
+ return __builtin_s390_vistrf(__a);
+}
+
+/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cp_until_zero_cc(vector signed char __a, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cp_until_zero_cc(vector bool char __a, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) {
+ return __builtin_s390_vistrbs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cp_until_zero_cc(vector signed short __a, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cp_until_zero_cc(vector bool short __a, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) {
+ return __builtin_s390_vistrhs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cp_until_zero_cc(vector signed int __a, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vistrfs((vector unsigned int)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cp_until_zero_cc(vector bool int __a, int *__cc) {
+ return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a,
+ __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) {
+ return __builtin_s390_vistrfs(__a, __cc);
+}
+
+/*-- vec_cmpeq_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeeb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeeb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeeh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeeh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfeef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfeef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfeef(__a, __b);
+}
+
+/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfeebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfeebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfeebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfeehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfeehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfeehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfeefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfeefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vfeefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfeezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfeezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfeezf(__a, __b);
+}
+
+/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfeezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfeezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfeezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfeezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfeezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfeezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfeezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfeezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfeezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeneb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeneb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeneb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeneh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeneh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeneh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfenef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfenef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfenef(__a, __b);
+}
+
+/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfenebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfenebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfenebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfenehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfenehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfenehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfenefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfenefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vfenefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfenezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfenezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfenezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfenezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfenezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfenezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfenezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfenezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfenezf(__a, __b);
+}
+
+/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfenezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfenezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfenezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfenezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfenezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfenezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfenezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfenezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfenezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmprg --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmprg(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmprg(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmprg(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
+}
+
+/*-- vec_cmprg_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
+}
+
+/*-- vec_cmprg_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrcb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrch(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrcf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrczb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrczh(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrczf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmpnrg -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpnrg(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpnrg(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpnrg(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
+}
+
+/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
+}
+
+/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrcb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrch(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrcf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrczb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrczh(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrczf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_find_any_eq --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector signed short __a, vector signed short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector signed int __a, vector signed int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector bool int __a, vector bool int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vfaef(__a, __b, 4);
+}
+
+/*-- vec_find_any_eq_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
+}
+
+/*-- vec_find_any_eq_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaeb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaeh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaef(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaebs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaefs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaezb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaezh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaezf(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx_cc(vector unsigned short __a,
+ vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_ne --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector signed char __a, vector signed char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector bool char __a, vector bool char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector signed short __a, vector signed short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector bool short __a, vector bool short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector signed int __a, vector signed int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector bool int __a, vector bool int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vfaef(__a, __b, 12);
+}
+
+/*-- vec_find_any_ne_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
+}
+
+/*-- vec_find_any_ne_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaeb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaeh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaef(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaebs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaefs(__a, __b, 8, __cc);
+}
+
+/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaezb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaezh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaezf(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx_cc(vector unsigned short __a,
+ vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
+}
+
+#undef __constant_pow2_range
+#undef __constant_range
+#undef __constant
+#undef __ATTRS_o
+#undef __ATTRS_o_ai
+#undef __ATTRS_ai
+
+#else
+
+#error "Use -fzvector to enable vector extensions"
+
+#endif
diff --git a/lib/Headers/wmmintrin.h b/lib/Headers/wmmintrin.h
index a2d931010aea..369e3c208e53 100644
--- a/lib/Headers/wmmintrin.h
+++ b/lib/Headers/wmmintrin.h
@@ -26,8 +26,17 @@
#include <emmintrin.h>
+#if !defined (__AES__) && !defined (__PCLMUL__)
+# error "AES/PCLMUL instructions not enabled"
+#else
+
+#ifdef __AES__
#include <__wmmintrin_aes.h>
+#endif /* __AES__ */
+#ifdef __PCLMUL__
#include <__wmmintrin_pclmul.h>
+#endif /* __PCLMUL__ */
+#endif /* __AES__ || __PCLMUL__ */
#endif /* _WMMINTRIN_H */
diff --git a/lib/Headers/x86intrin.h b/lib/Headers/x86intrin.h
index 4d8077e38291..21a43daf3c2d 100644
--- a/lib/Headers/x86intrin.h
+++ b/lib/Headers/x86intrin.h
@@ -28,29 +28,53 @@
#include <immintrin.h>
+#ifdef __3dNOW__
#include <mm3dnow.h>
+#endif
+#ifdef __BMI__
#include <bmiintrin.h>
+#endif
+#ifdef __BMI2__
#include <bmi2intrin.h>
+#endif
+#ifdef __LZCNT__
#include <lzcntintrin.h>
+#endif
+#ifdef __POPCNT__
#include <popcntintrin.h>
+#endif
+#ifdef __RDSEED__
#include <rdseedintrin.h>
+#endif
+#ifdef __PRFCHW__
#include <prfchwintrin.h>
+#endif
+#ifdef __SSE4A__
#include <ammintrin.h>
+#endif
+#ifdef __FMA4__
#include <fma4intrin.h>
+#endif
+#ifdef __XOP__
#include <xopintrin.h>
+#endif
+#ifdef __TBM__
#include <tbmintrin.h>
+#endif
+#ifdef __F16C__
#include <f16cintrin.h>
+#endif
/* FIXME: LWP */
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index 18aa8c1426a3..0d58c753029f 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -24,6 +24,10 @@
#ifndef __XMMINTRIN_H
#define __XMMINTRIN_H
+#ifndef __SSE__
+#error "SSE instruction set not enabled"
+#else
+
#include <mmintrin.h>
typedef int __v4si __attribute__((__vector_size__(16)));
@@ -37,7 +41,7 @@ typedef float __m128 __attribute__((__vector_size__(16)));
#endif
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_add_ss(__m128 __a, __m128 __b)
@@ -999,4 +1003,6 @@ do { \
#include <emmintrin.h>
#endif
+#endif /* __SSE__ */
+
#endif /* __XMMINTRIN_H */
diff --git a/lib/Headers/xopintrin.h b/lib/Headers/xopintrin.h
index 86188bb29fd7..2eb35c4be844 100644
--- a/lib/Headers/xopintrin.h
+++ b/lib/Headers/xopintrin.h
@@ -28,10 +28,14 @@
#ifndef __XOPINTRIN_H
#define __XOPINTRIN_H
+#ifndef __XOP__
+# error "XOP instruction set is not enabled"
+#else
+
#include <fma4intrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
@@ -800,4 +804,6 @@ _mm256_frcz_pd(__m256d __A)
#undef __DEFAULT_FN_ATTRS
+#endif /* __XOP__ */
+
#endif /* __XOPINTRIN_H */
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index b805990eecbf..983dc18b57af 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -14,6 +14,7 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
+#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/HeaderMap.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -151,7 +152,8 @@ std::string HeaderSearch::getModuleFileName(StringRef ModuleName,
auto FileName = llvm::sys::path::filename(ModuleMapPath);
llvm::hash_code Hash =
- llvm::hash_combine(DirName.lower(), FileName.lower());
+ llvm::hash_combine(DirName.lower(), FileName.lower(),
+ HSOpts->ModuleFormat);
SmallString<128> HashStr;
llvm::APInt(64, size_t(Hash)).toStringUnsigned(HashStr, /*Radix*/36);
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index f46af889e25d..45878b9b1508 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -3603,6 +3603,14 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
continue;
}
+ if (Tok.is(tok::annot_pragma_openmp)) {
+ // Result can be ignored, because it must be always empty.
+ auto Res = ParseOpenMPDeclarativeDirective();
+ assert(!Res);
+ // Silence possible warnings.
+ (void)Res;
+ continue;
+ }
if (!Tok.is(tok::at)) {
auto CFieldCallback = [&](ParsingFieldDeclarator &FD) {
// Install the declarator into the current TagDecl.
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index e76f767786f0..0574a636337f 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -476,11 +476,15 @@ void Parser::Initialize() {
Ident_super = &PP.getIdentifierTable().get("super");
- if (getLangOpts().AltiVec) {
+ Ident_vector = nullptr;
+ Ident_bool = nullptr;
+ Ident_pixel = nullptr;
+ if (getLangOpts().AltiVec || getLangOpts().ZVector) {
Ident_vector = &PP.getIdentifierTable().get("vector");
- Ident_pixel = &PP.getIdentifierTable().get("pixel");
Ident_bool = &PP.getIdentifierTable().get("bool");
}
+ if (getLangOpts().AltiVec)
+ Ident_pixel = &PP.getIdentifierTable().get("pixel");
Ident_introduced = nullptr;
Ident_deprecated = nullptr;
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index ea3872f42700..4adbb2b6af2a 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -987,10 +987,11 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
Diag(D, TSWLoc, diag::err_invalid_vector_bool_decl_spec)
<< getSpecifierName((TSW)TypeSpecWidth);
- // vector bool long long requires VSX support.
+ // vector bool long long requires VSX support or ZVector.
if ((TypeSpecWidth == TSW_longlong) &&
(!PP.getTargetInfo().hasFeature("vsx")) &&
- (!PP.getTargetInfo().hasFeature("power8-vector")))
+ (!PP.getTargetInfo().hasFeature("power8-vector")) &&
+ !PP.getLangOpts().ZVector)
Diag(D, TSTLoc, diag::err_invalid_vector_long_long_decl_spec);
// Elements of vector bool are interpreted as unsigned. (PIM 2.1)
@@ -999,14 +1000,23 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
TypeSpecSign = TSS_unsigned;
} else if (TypeSpecType == TST_double) {
// vector long double and vector long long double are never allowed.
- // vector double is OK for Power7 and later.
+ // vector double is OK for Power7 and later, and ZVector.
if (TypeSpecWidth == TSW_long || TypeSpecWidth == TSW_longlong)
Diag(D, TSWLoc, diag::err_invalid_vector_long_double_decl_spec);
- else if (!PP.getTargetInfo().hasFeature("vsx"))
+ else if (!PP.getTargetInfo().hasFeature("vsx") &&
+ !PP.getLangOpts().ZVector)
Diag(D, TSTLoc, diag::err_invalid_vector_double_decl_spec);
+ } else if (TypeSpecType == TST_float) {
+ // vector float is unsupported for ZVector.
+ if (PP.getLangOpts().ZVector)
+ Diag(D, TSTLoc, diag::err_invalid_vector_float_decl_spec);
} else if (TypeSpecWidth == TSW_long) {
- Diag(D, TSWLoc, diag::warn_vector_long_decl_spec_combination)
- << getSpecifierName((TST)TypeSpecType, Policy);
+ // vector long is unsupported for ZVector and deprecated for AltiVec.
+ if (PP.getLangOpts().ZVector)
+ Diag(D, TSWLoc, diag::err_invalid_vector_long_decl_spec);
+ else
+ Diag(D, TSWLoc, diag::warn_vector_long_decl_spec_combination)
+ << getSpecifierName((TST)TypeSpecType, Policy);
}
if (TypeAltiVecPixel) {
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index d9d26e2af602..1a8ab6e209e5 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -8727,23 +8727,10 @@ static bool isSetterLikeSelector(Selector sel) {
static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
ObjCMessageExpr *Message) {
- if (S.NSMutableArrayPointer.isNull()) {
- IdentifierInfo *NSMutableArrayId =
- S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSMutableArray);
- NamedDecl *IF = S.LookupSingleName(S.TUScope, NSMutableArrayId,
- Message->getLocStart(),
- Sema::LookupOrdinaryName);
- ObjCInterfaceDecl *InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
- if (!InterfaceDecl) {
- return None;
- }
- QualType NSMutableArrayObject =
- S.Context.getObjCInterfaceType(InterfaceDecl);
- S.NSMutableArrayPointer =
- S.Context.getObjCObjectPointerType(NSMutableArrayObject);
- }
-
- if (S.NSMutableArrayPointer != Message->getReceiverType()) {
+ bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(),
+ NSAPI::ClassId_NSMutableArray);
+ if (!IsMutableArray) {
return None;
}
@@ -8775,24 +8762,10 @@ static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
static
Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
ObjCMessageExpr *Message) {
-
- if (S.NSMutableDictionaryPointer.isNull()) {
- IdentifierInfo *NSMutableDictionaryId =
- S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSMutableDictionary);
- NamedDecl *IF = S.LookupSingleName(S.TUScope, NSMutableDictionaryId,
- Message->getLocStart(),
- Sema::LookupOrdinaryName);
- ObjCInterfaceDecl *InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
- if (!InterfaceDecl) {
- return None;
- }
- QualType NSMutableDictionaryObject =
- S.Context.getObjCInterfaceType(InterfaceDecl);
- S.NSMutableDictionaryPointer =
- S.Context.getObjCObjectPointerType(NSMutableDictionaryObject);
- }
-
- if (S.NSMutableDictionaryPointer != Message->getReceiverType()) {
+ bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(),
+ NSAPI::ClassId_NSMutableDictionary);
+ if (!IsMutableDictionary) {
return None;
}
@@ -8820,63 +8793,14 @@ Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
}
static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
-
- ObjCInterfaceDecl *InterfaceDecl;
- if (S.NSMutableSetPointer.isNull()) {
- IdentifierInfo *NSMutableSetId =
- S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSMutableSet);
- NamedDecl *IF = S.LookupSingleName(S.TUScope, NSMutableSetId,
- Message->getLocStart(),
- Sema::LookupOrdinaryName);
- InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
- if (InterfaceDecl) {
- QualType NSMutableSetObject =
- S.Context.getObjCInterfaceType(InterfaceDecl);
- S.NSMutableSetPointer =
- S.Context.getObjCObjectPointerType(NSMutableSetObject);
- }
- }
-
- if (S.NSCountedSetPointer.isNull()) {
- IdentifierInfo *NSCountedSetId =
- S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSCountedSet);
- NamedDecl *IF = S.LookupSingleName(S.TUScope, NSCountedSetId,
- Message->getLocStart(),
- Sema::LookupOrdinaryName);
- InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
- if (InterfaceDecl) {
- QualType NSCountedSetObject =
- S.Context.getObjCInterfaceType(InterfaceDecl);
- S.NSCountedSetPointer =
- S.Context.getObjCObjectPointerType(NSCountedSetObject);
- }
- }
-
- if (S.NSMutableOrderedSetPointer.isNull()) {
- IdentifierInfo *NSOrderedSetId =
- S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSMutableOrderedSet);
- NamedDecl *IF = S.LookupSingleName(S.TUScope, NSOrderedSetId,
- Message->getLocStart(),
- Sema::LookupOrdinaryName);
- InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
- if (InterfaceDecl) {
- QualType NSOrderedSetObject =
- S.Context.getObjCInterfaceType(InterfaceDecl);
- S.NSMutableOrderedSetPointer =
- S.Context.getObjCObjectPointerType(NSOrderedSetObject);
- }
- }
-
- QualType ReceiverType = Message->getReceiverType();
-
- bool IsMutableSet = !S.NSMutableSetPointer.isNull() &&
- ReceiverType == S.NSMutableSetPointer;
- bool IsMutableOrderedSet = !S.NSMutableOrderedSetPointer.isNull() &&
- ReceiverType == S.NSMutableOrderedSetPointer;
- bool IsCountedSet = !S.NSCountedSetPointer.isNull() &&
- ReceiverType == S.NSCountedSetPointer;
-
- if (!IsMutableSet && !IsMutableOrderedSet && !IsCountedSet) {
+ bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(),
+ NSAPI::ClassId_NSMutableSet);
+
+ bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(),
+ NSAPI::ClassId_NSMutableOrderedSet);
+ if (!IsMutableSet && !IsMutableOrderedSet) {
return None;
}
@@ -8917,38 +8841,51 @@ void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
int ArgIndex = *ArgOpt;
- Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
- if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
- Receiver = OE->getSourceExpr()->IgnoreImpCasts();
- }
-
Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
Arg = OE->getSourceExpr()->IgnoreImpCasts();
}
- if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
+ if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
- if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
- ValueDecl *Decl = ReceiverRE->getDecl();
+ if (ArgRE->isObjCSelfExpr()) {
Diag(Message->getSourceRange().getBegin(),
diag::warn_objc_circular_container)
- << Decl->getName();
- Diag(Decl->getLocation(),
- diag::note_objc_circular_container_declared_here)
- << Decl->getName();
+ << ArgRE->getDecl()->getName() << StringRef("super");
}
}
- } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
- if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
- if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
- ObjCIvarDecl *Decl = IvarRE->getDecl();
- Diag(Message->getSourceRange().getBegin(),
- diag::warn_objc_circular_container)
- << Decl->getName();
- Diag(Decl->getLocation(),
- diag::note_objc_circular_container_declared_here)
- << Decl->getName();
+ } else {
+ Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
+
+ if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
+ Receiver = OE->getSourceExpr()->IgnoreImpCasts();
+ }
+
+ if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
+ if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
+ ValueDecl *Decl = ReceiverRE->getDecl();
+ Diag(Message->getSourceRange().getBegin(),
+ diag::warn_objc_circular_container)
+ << Decl->getName() << Decl->getName();
+ if (!ArgRE->isObjCSelfExpr()) {
+ Diag(Decl->getLocation(),
+ diag::note_objc_circular_container_declared_here)
+ << Decl->getName();
+ }
+ }
+ }
+ } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
+ if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
+ if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
+ ObjCIvarDecl *Decl = IvarRE->getDecl();
+ Diag(Message->getSourceRange().getBegin(),
+ diag::warn_objc_circular_container)
+ << Decl->getName() << Decl->getName();
+ Diag(Decl->getLocation(),
+ diag::note_objc_circular_container_declared_here)
+ << Decl->getName();
+ }
}
}
}
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 99d4a79c4669..c694a20319b0 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -5381,10 +5381,9 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
bool AddsAttr = !(OldImportAttr || OldExportAttr) && HasNewAttr;
if (AddsAttr && !IsSpecialization && !OldDecl->isImplicit()) {
- // If the declaration hasn't been used yet, allow with a warning for
- // free functions and global variables.
+ // Allow with a warning for free functions and global variables.
bool JustWarn = false;
- if (!OldDecl->isUsed() && !OldDecl->isCXXClassMember()) {
+ if (!OldDecl->isCXXClassMember()) {
auto *VD = dyn_cast<VarDecl>(OldDecl);
if (VD && !VD->getDescribedVarTemplate())
JustWarn = true;
@@ -5393,6 +5392,13 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
JustWarn = true;
}
+ // We cannot change a declaration that's been used because IR has already
+ // been emitted. Dllimported functions will still work though (modulo
+ // address equality) as they can use the thunk.
+ if (OldDecl->isUsed())
+ if (!isa<FunctionDecl>(OldDecl) || !NewImportAttr)
+ JustWarn = false;
+
unsigned DiagID = JustWarn ? diag::warn_attribute_dll_redeclaration
: diag::err_attribute_dll_redeclaration;
S.Diag(NewDecl->getLocation(), DiagID)
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index 094187025df0..2cf02b484cb4 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -161,7 +161,13 @@ Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) {
else
InstantiateExceptionSpec(Loc, SourceDecl);
- return SourceDecl->getType()->castAs<FunctionProtoType>();
+ const FunctionProtoType *Proto =
+ SourceDecl->getType()->castAs<FunctionProtoType>();
+ if (Proto->getExceptionSpecType() == clang::EST_Unparsed) {
+ Diag(Loc, diag::err_exception_spec_not_parsed);
+ Proto = nullptr;
+ }
+ return Proto;
}
void
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 1ae983cad227..6499cb5d19eb 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -4949,6 +4949,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (!Result.isUsable()) return ExprError();
TheCall = dyn_cast<CallExpr>(Result.get());
if (!TheCall) return Result;
+ Args = ArrayRef<Expr *>(TheCall->getArgs(), TheCall->getNumArgs());
}
// Bail out early if calling a builtin with custom typechecking.
@@ -5518,7 +5519,7 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
// i.e. all the elements are integer constants.
ParenExpr *PE = dyn_cast<ParenExpr>(CastExpr);
ParenListExpr *PLE = dyn_cast<ParenListExpr>(CastExpr);
- if ((getLangOpts().AltiVec || getLangOpts().OpenCL)
+ if ((getLangOpts().AltiVec || getLangOpts().ZVector || getLangOpts().OpenCL)
&& castType->isVectorType() && (PE || PLE)) {
if (PLE && PLE->getNumExprs() == 0) {
Diag(PLE->getExprLoc(), diag::err_altivec_empty_initializer);
@@ -6075,7 +6076,9 @@ OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond,
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
QualType VecResTy = S.CheckVectorOperands(LHS, RHS, QuestionLoc,
- /*isCompAssign*/false);
+ /*isCompAssign*/false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
if (VecResTy.isNull()) return QualType();
// The result type must match the condition type as specified in
// OpenCL v1.1 s6.11.6.
@@ -6126,7 +6129,9 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// Now check the two expressions.
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType())
- return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false);
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
QualType ResTy = UsualArithmeticConversions(LHS, RHS);
if (LHS.isInvalid() || RHS.isInvalid())
@@ -7267,7 +7272,9 @@ static bool tryVectorConvertAndSplat(Sema &S, ExprResult *scalar,
}
QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
- SourceLocation Loc, bool IsCompAssign) {
+ SourceLocation Loc, bool IsCompAssign,
+ bool AllowBothBool,
+ bool AllowBoolConversions) {
if (!IsCompAssign) {
LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
if (LHS.isInvalid())
@@ -7282,14 +7289,21 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
QualType LHSType = LHS.get()->getType().getUnqualifiedType();
QualType RHSType = RHS.get()->getType().getUnqualifiedType();
- // If the vector types are identical, return.
- if (Context.hasSameType(LHSType, RHSType))
- return LHSType;
-
const VectorType *LHSVecType = LHSType->getAs<VectorType>();
const VectorType *RHSVecType = RHSType->getAs<VectorType>();
assert(LHSVecType || RHSVecType);
+ // AltiVec-style "vector bool op vector bool" combinations are allowed
+ // for some operators but not others.
+ if (!AllowBothBool &&
+ LHSVecType && LHSVecType->getVectorKind() == VectorType::AltiVecBool &&
+ RHSVecType && RHSVecType->getVectorKind() == VectorType::AltiVecBool)
+ return InvalidOperands(Loc, LHS, RHS);
+
+ // If the vector types are identical, return.
+ if (Context.hasSameType(LHSType, RHSType))
+ return LHSType;
+
// If we have compatible AltiVec and GCC vector types, use the AltiVec type.
if (LHSVecType && RHSVecType &&
Context.areCompatibleVectorTypes(LHSType, RHSType)) {
@@ -7303,6 +7317,28 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
return RHSType;
}
+ // AllowBoolConversions says that bool and non-bool AltiVec vectors
+ // can be mixed, with the result being the non-bool type. The non-bool
+ // operand must have integer element type.
+ if (AllowBoolConversions && LHSVecType && RHSVecType &&
+ LHSVecType->getNumElements() == RHSVecType->getNumElements() &&
+ (Context.getTypeSize(LHSVecType->getElementType()) ==
+ Context.getTypeSize(RHSVecType->getElementType()))) {
+ if (LHSVecType->getVectorKind() == VectorType::AltiVecVector &&
+ LHSVecType->getElementType()->isIntegerType() &&
+ RHSVecType->getVectorKind() == VectorType::AltiVecBool) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
+ return LHSType;
+ }
+ if (!IsCompAssign &&
+ LHSVecType->getVectorKind() == VectorType::AltiVecBool &&
+ RHSVecType->getVectorKind() == VectorType::AltiVecVector &&
+ RHSVecType->getElementType()->isIntegerType()) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast);
+ return RHSType;
+ }
+ }
+
// If there's an ext-vector type and a scalar, try to convert the scalar to
// the vector element type and splat.
if (!RHSVecType && isa<ExtVectorType>(LHSVecType)) {
@@ -7391,7 +7427,9 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType())
- return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign);
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ /*AllowBothBool*/getLangOpts().AltiVec,
+ /*AllowBoolConversions*/false);
QualType compType = UsualArithmeticConversions(LHS, RHS, IsCompAssign);
if (LHS.isInvalid() || RHS.isInvalid())
@@ -7420,7 +7458,9 @@ QualType Sema::CheckRemainderOperands(
RHS.get()->getType()->isVectorType()) {
if (LHS.get()->getType()->hasIntegerRepresentation() &&
RHS.get()->getType()->hasIntegerRepresentation())
- return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign);
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ /*AllowBothBool*/getLangOpts().AltiVec,
+ /*AllowBoolConversions*/false);
return InvalidOperands(Loc, LHS, RHS);
}
@@ -7706,7 +7746,10 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- QualType compType = CheckVectorOperands(LHS, RHS, Loc, CompLHSTy);
+ QualType compType = CheckVectorOperands(
+ LHS, RHS, Loc, CompLHSTy,
+ /*AllowBothBool*/getLangOpts().AltiVec,
+ /*AllowBoolConversions*/getLangOpts().ZVector);
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
@@ -7781,7 +7824,10 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- QualType compType = CheckVectorOperands(LHS, RHS, Loc, CompLHSTy);
+ QualType compType = CheckVectorOperands(
+ LHS, RHS, Loc, CompLHSTy,
+ /*AllowBothBool*/getLangOpts().AltiVec,
+ /*AllowBoolConversions*/getLangOpts().ZVector);
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
@@ -8023,7 +8069,21 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
RHS.get()->getType()->isVectorType()) {
if (LangOpts.OpenCL)
return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
- return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign);
+ if (LangOpts.ZVector) {
+ // The shift operators for the z vector extensions work basically
+ // like OpenCL shifts, except that neither the LHS nor the RHS is
+ // allowed to be a "vector bool".
+ if (auto LHSVecType = LHS.get()->getType()->getAs<VectorType>())
+ if (LHSVecType->getVectorKind() == VectorType::AltiVecBool)
+ return InvalidOperands(Loc, LHS, RHS);
+ if (auto RHSVecType = RHS.get()->getType()->getAs<VectorType>())
+ if (RHSVecType->getVectorKind() == VectorType::AltiVecBool)
+ return InvalidOperands(Loc, LHS, RHS);
+ return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
+ }
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
}
// Shifts don't perform usual arithmetic conversions, they just do integer
@@ -8797,7 +8857,9 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
bool IsRelational) {
// Check to make sure we're operating on vectors of the same type and width,
// Allowing one side to be a scalar of element type.
- QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false);
+ QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/getLangOpts().ZVector);
if (vType.isNull())
return vType;
@@ -8805,7 +8867,8 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
// If AltiVec, the comparison results in a numeric type, i.e.
// bool for C++, int for C
- if (vType->getAs<VectorType>()->getVectorKind() == VectorType::AltiVecVector)
+ if (getLangOpts().AltiVec &&
+ vType->getAs<VectorType>()->getVectorKind() == VectorType::AltiVecVector)
return Context.getLogicalOperationType();
// For non-floating point types, check for self-comparisons of the form
@@ -8839,7 +8902,9 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc) {
// Ensure that either both operands are of the same vector type, or
// one operand is of a vector type and the other is of its element type.
- QualType vType = CheckVectorOperands(LHS, RHS, Loc, false);
+ QualType vType = CheckVectorOperands(LHS, RHS, Loc, false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
if (vType.isNull())
return InvalidOperands(Loc, LHS, RHS);
if (getLangOpts().OpenCL && getLangOpts().OpenCLVersion < 120 &&
@@ -8857,8 +8922,9 @@ inline QualType Sema::CheckBitwiseOperands(
RHS.get()->getType()->isVectorType()) {
if (LHS.get()->getType()->hasIntegerRepresentation() &&
RHS.get()->getType()->hasIntegerRepresentation())
- return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign);
-
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/getLangOpts().ZVector);
return InvalidOperands(Loc, LHS, RHS);
}
@@ -9472,6 +9538,10 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
IsInc, IsPrefix);
} else if (S.getLangOpts().AltiVec && ResType->isVectorType()) {
// OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 )
+ } else if (S.getLangOpts().ZVector && ResType->isVectorType() &&
+ (ResType->getAs<VectorType>()->getVectorKind() !=
+ VectorType::AltiVecBool)) {
+ // The z vector extensions allow ++ and -- for non-bool vectors.
} else if(S.getLangOpts().OpenCL && ResType->isVectorType() &&
ResType->getAs<VectorType>()->getElementType()->isIntegerType()) {
// OpenCL V1.2 6.3 says dec/inc ops operate on integer vector types.
@@ -10552,8 +10622,13 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
resultType = Input.get()->getType();
if (resultType->isDependentType())
break;
- if (resultType->isArithmeticType() || // C99 6.5.3.3p1
- resultType->isVectorType())
+ if (resultType->isArithmeticType()) // C99 6.5.3.3p1
+ break;
+ else if (resultType->isVectorType() &&
+ // The z vector extensions don't allow + or - with bool vectors.
+ (!Context.getLangOpts().ZVector ||
+ resultType->getAs<VectorType>()->getVectorKind() !=
+ VectorType::AltiVecBool))
break;
else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6
Opc == UO_Plus &&
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 6608d7c1f061..01966d569a11 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -4950,7 +4950,9 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// Extension: conditional operator involving vector types.
if (LTy->isVectorType() || RTy->isVectorType())
- return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false);
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
// -- The second and third operands have arithmetic or enumeration type;
// the usual arithmetic conversions are performed to bring them to a
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index f0f7cb93d339..adec512693f3 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -2372,14 +2372,12 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
return true;
}
} else {
- // Make sure the bit-widths and signedness match.
- if (DesignatedStartIndex.getBitWidth() > DesignatedEndIndex.getBitWidth())
- DesignatedEndIndex
- = DesignatedEndIndex.extend(DesignatedStartIndex.getBitWidth());
- else if (DesignatedStartIndex.getBitWidth() <
- DesignatedEndIndex.getBitWidth())
- DesignatedStartIndex
- = DesignatedStartIndex.extend(DesignatedEndIndex.getBitWidth());
+ unsigned DesignatedIndexBitWidth =
+ ConstantArrayType::getMaxSizeBits(SemaRef.Context);
+ DesignatedStartIndex =
+ DesignatedStartIndex.extOrTrunc(DesignatedIndexBitWidth);
+ DesignatedEndIndex =
+ DesignatedEndIndex.extOrTrunc(DesignatedIndexBitWidth);
DesignatedStartIndex.setIsUnsigned(true);
DesignatedEndIndex.setIsUnsigned(true);
}
@@ -5928,6 +5926,9 @@ static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
if (!InitExpr)
return;
+ if (!S.ActiveTemplateInstantiations.empty())
+ return;
+
QualType DestType = InitExpr->getType();
if (!DestType->isRecordType())
return;
@@ -5943,24 +5944,6 @@ static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
return;
InitExpr = CCE->getArg(0)->IgnoreImpCasts();
-
- // Remove implicit temporary and constructor nodes.
- if (const MaterializeTemporaryExpr *MTE =
- dyn_cast<MaterializeTemporaryExpr>(InitExpr)) {
- InitExpr = MTE->GetTemporaryExpr()->IgnoreImpCasts();
- while (const CXXConstructExpr *CCE =
- dyn_cast<CXXConstructExpr>(InitExpr)) {
- if (isa<CXXTemporaryObjectExpr>(CCE))
- return;
- if (CCE->getNumArgs() == 0)
- return;
- if (CCE->getNumArgs() > 1 && !isa<CXXDefaultArgExpr>(CCE->getArg(1)))
- return;
- InitExpr = CCE->getArg(0);
- }
- InitExpr = InitExpr->IgnoreImpCasts();
- DiagID = diag::warn_redundant_move_on_return;
- }
}
// Find the std::move call and get the argument.
@@ -5985,14 +5968,20 @@ static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
if (!VD || !VD->hasLocalStorage())
return;
- if (!VD->getType()->isRecordType())
+ QualType SourceType = VD->getType();
+ if (!SourceType->isRecordType())
return;
- if (DiagID == 0) {
- DiagID = S.Context.hasSameUnqualifiedType(DestType, VD->getType())
- ? diag::warn_pessimizing_move_on_return
- : diag::warn_redundant_move_on_return;
+ if (!S.Context.hasSameUnqualifiedType(DestType, SourceType)) {
+ return;
}
+
+ // If we're returning a function parameter, copy elision
+ // is not possible.
+ if (isa<ParmVarDecl>(VD))
+ DiagID = diag::warn_redundant_move_on_return;
+ else
+ DiagID = diag::warn_pessimizing_move_on_return;
} else {
DiagID = diag::warn_pessimizing_move_on_initialization;
const Expr *ArgStripped = Arg->IgnoreImplicit()->IgnoreParens();
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index 3fd1f21ba3fd..2e7f89191580 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -4245,7 +4245,7 @@ std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
// Don't try to correct the identifier "vector" when in AltiVec mode.
// TODO: Figure out why typo correction misbehaves in this case, fix it, and
// remove this workaround.
- if (getLangOpts().AltiVec && Typo->isStr("vector"))
+ if ((getLangOpts().AltiVec || getLangOpts().ZVector) && Typo->isStr("vector"))
return nullptr;
// Provide a stop gap for files that are just seriously broken. Trying
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index 4030d9e66e02..ca67a1c0bf24 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -12,6 +12,7 @@
///
//===----------------------------------------------------------------------===//
+#include "TreeTransform.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/Decl.h"
@@ -120,6 +121,7 @@ private:
/// from current directive.
OpenMPClauseKind ClauseKindMode;
Sema &SemaRef;
+ bool ForceCapturing;
typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator;
@@ -130,11 +132,15 @@ private:
public:
explicit DSAStackTy(Sema &S)
- : Stack(1), ClauseKindMode(OMPC_unknown), SemaRef(S) {}
+ : Stack(1), ClauseKindMode(OMPC_unknown), SemaRef(S),
+ ForceCapturing(false) {}
bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
+ bool isForceVarCapturing() const { return ForceCapturing; }
+ void setForceVarCapturing(bool V) { ForceCapturing = V; }
+
void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
Stack.push_back(SharingMapTy(DKind, DirName, CurScope, Loc));
@@ -652,10 +658,13 @@ void Sema::InitDataSharingAttributesStack() {
bool Sema::IsOpenMPCapturedVar(VarDecl *VD) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
VD = VD->getCanonicalDecl();
- if (DSAStack->getCurrentDirective() != OMPD_unknown) {
+ if (DSAStack->getCurrentDirective() != OMPD_unknown &&
+ (!DSAStack->isClauseParsingMode() ||
+ DSAStack->getParentDirective() != OMPD_unknown)) {
if (DSAStack->isLoopControlVariable(VD) ||
(VD->hasLocalStorage() &&
- isParallelOrTaskRegion(DSAStack->getCurrentDirective())))
+ isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
+ DSAStack->isForceVarCapturing())
return true;
auto DVarPrivate = DSAStack->getTopDSA(VD, DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
@@ -1350,13 +1359,18 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
// This is required for proper codegen.
for (auto *Clause : Clauses) {
if (isOpenMPPrivate(Clause->getClauseKind()) ||
- Clause->getClauseKind() == OMPC_copyprivate) {
+ Clause->getClauseKind() == OMPC_copyprivate ||
+ (getLangOpts().OpenMPUseTLS &&
+ getASTContext().getTargetInfo().isTLSSupported() &&
+ Clause->getClauseKind() == OMPC_copyin)) {
+ DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
// Mark all variables in private list clauses as used in inner region.
for (auto *VarRef : Clause->children()) {
if (auto *E = cast_or_null<Expr>(VarRef)) {
MarkDeclarationsReferencedInExpr(E);
}
}
+ DSAStack->setForceVarCapturing(/*V=*/false);
} else if (isParallelOrTaskRegion(DSAStack->getCurrentDirective()) &&
Clause->getClauseKind() == OMPC_schedule) {
// Mark all variables in private list clauses as used in inner region.
@@ -2262,6 +2276,22 @@ bool OpenMPIterationSpaceChecker::Dependent() const {
(UB && UB->isValueDependent()) || (Step && Step->isValueDependent());
}
+template <typename T>
+static T *getExprAsWritten(T *E) {
+ if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
+ E = ExprTemp->getSubExpr();
+
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = MTE->GetTemporaryExpr();
+
+ while (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = Binder->getSubExpr();
+
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExprAsWritten();
+ return E->IgnoreParens();
+}
+
bool OpenMPIterationSpaceChecker::SetVarAndLB(VarDecl *NewVar,
DeclRefExpr *NewVarRefExpr,
Expr *NewLB) {
@@ -2272,6 +2302,12 @@ bool OpenMPIterationSpaceChecker::SetVarAndLB(VarDecl *NewVar,
return true;
Var = NewVar;
VarRef = NewVarRefExpr;
+ if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
+ if (const CXXConstructorDecl *Ctor = CE->getConstructor())
+ if ((Ctor->isCopyOrMoveConstructor() ||
+ Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
+ CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
+ NewLB = CE->getArg(0)->IgnoreParenImpCasts();
LB = NewLB;
return false;
}
@@ -2402,11 +2438,12 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
static const VarDecl *GetInitVarDecl(const Expr *E) {
if (!E)
return nullptr;
- E = E->IgnoreParenImpCasts();
+ E = getExprAsWritten(E);
if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
if (const CXXConstructorDecl *Ctor = CE->getConstructor())
- if (Ctor->isCopyConstructor() && CE->getNumArgs() == 1 &&
- CE->getArg(0) != nullptr)
+ if ((Ctor->isCopyOrMoveConstructor() ||
+ Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
+ CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
E = CE->getArg(0)->IgnoreParenImpCasts();
auto DRE = dyn_cast_or_null<DeclRefExpr>(E);
if (!DRE)
@@ -2425,7 +2462,7 @@ bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond) << Var;
return true;
}
- S = S->IgnoreParenImpCasts();
+ S = getExprAsWritten(S);
SourceLocation CondLoc = S->getLocStart();
if (auto BO = dyn_cast<BinaryOperator>(S)) {
if (BO->isRelationalOp()) {
@@ -2565,20 +2602,85 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
return true;
}
+namespace {
+// Transform variables declared in GNU statement expressions to new ones to
+// avoid crash on codegen.
+class TransformToNewDefs : public TreeTransform<TransformToNewDefs> {
+ typedef TreeTransform<TransformToNewDefs> BaseTransform;
+
+public:
+ TransformToNewDefs(Sema &SemaRef) : BaseTransform(SemaRef) {}
+
+ Decl *TransformDefinition(SourceLocation Loc, Decl *D) {
+ if (auto *VD = cast<VarDecl>(D))
+ if (!isa<ParmVarDecl>(D) && !isa<VarTemplateSpecializationDecl>(D) &&
+ !isa<ImplicitParamDecl>(D)) {
+ auto *NewVD = VarDecl::Create(
+ SemaRef.Context, VD->getDeclContext(), VD->getLocStart(),
+ VD->getLocation(), VD->getIdentifier(), VD->getType(),
+ VD->getTypeSourceInfo(), VD->getStorageClass());
+ NewVD->setTSCSpec(VD->getTSCSpec());
+ NewVD->setInit(VD->getInit());
+ NewVD->setInitStyle(VD->getInitStyle());
+ NewVD->setExceptionVariable(VD->isExceptionVariable());
+ NewVD->setNRVOVariable(VD->isNRVOVariable());
+ NewVD->setCXXForRangeDecl(VD->isInExternCXXContext());
+ NewVD->setConstexpr(VD->isConstexpr());
+ NewVD->setInitCapture(VD->isInitCapture());
+ NewVD->setPreviousDeclInSameBlockScope(
+ VD->isPreviousDeclInSameBlockScope());
+ VD->getDeclContext()->addHiddenDecl(NewVD);
+ transformedLocalDecl(VD, NewVD);
+ return NewVD;
+ }
+ return BaseTransform::TransformDefinition(Loc, D);
+ }
+
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E) {
+ if (auto *NewD = TransformDecl(E->getExprLoc(), E->getDecl()))
+ if (E->getDecl() != NewD) {
+ NewD->setReferenced();
+ NewD->markUsed(SemaRef.Context);
+ return DeclRefExpr::Create(
+ SemaRef.Context, E->getQualifierLoc(), E->getTemplateKeywordLoc(),
+ cast<ValueDecl>(NewD), E->refersToEnclosingVariableOrCapture(),
+ E->getNameInfo(), E->getType(), E->getValueKind());
+ }
+ return BaseTransform::TransformDeclRefExpr(E);
+ }
+};
+}
+
/// \brief Build the expression to calculate the number of iterations.
Expr *
OpenMPIterationSpaceChecker::BuildNumIterations(Scope *S,
const bool LimitedType) const {
+ TransformToNewDefs Transform(SemaRef);
ExprResult Diff;
- if (Var->getType()->isIntegerType() || Var->getType()->isPointerType() ||
+ auto VarType = Var->getType().getNonReferenceType();
+ if (VarType->isIntegerType() || VarType->isPointerType() ||
SemaRef.getLangOpts().CPlusPlus) {
// Upper - Lower
- Expr *Upper = TestIsLessOp ? UB : LB;
- Expr *Lower = TestIsLessOp ? LB : UB;
+ auto *UBExpr = TestIsLessOp ? UB : LB;
+ auto *LBExpr = TestIsLessOp ? LB : UB;
+ Expr *Upper = Transform.TransformExpr(UBExpr).get();
+ Expr *Lower = Transform.TransformExpr(LBExpr).get();
+ if (!Upper || !Lower)
+ return nullptr;
+ Upper = SemaRef.PerformImplicitConversion(Upper, UBExpr->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get();
+ Lower = SemaRef.PerformImplicitConversion(Lower, LBExpr->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get();
+ if (!Upper || !Lower)
+ return nullptr;
Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
- if (!Diff.isUsable() && Var->getType()->getAsCXXRecordDecl()) {
+ if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
// BuildBinOp already emitted error, this one is to point user to upper
// and lower bound, and to tell what is passed to 'operator-'.
SemaRef.Diag(Upper->getLocStart(), diag::err_omp_loop_diff_cxx)
@@ -2599,8 +2701,15 @@ OpenMPIterationSpaceChecker::BuildNumIterations(Scope *S,
return nullptr;
// Upper - Lower [- 1] + Step
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(),
- Step->IgnoreImplicit());
+ auto NewStep = Transform.TransformExpr(Step->IgnoreImplicit());
+ if (NewStep.isInvalid())
+ return nullptr;
+ NewStep = SemaRef.PerformImplicitConversion(
+ NewStep.get(), Step->IgnoreImplicit()->getType(), Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStep.isInvalid())
+ return nullptr;
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
if (!Diff.isUsable())
return nullptr;
@@ -2610,15 +2719,35 @@ OpenMPIterationSpaceChecker::BuildNumIterations(Scope *S,
return nullptr;
// (Upper - Lower [- 1] + Step) / Step
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(),
- Step->IgnoreImplicit());
+ NewStep = Transform.TransformExpr(Step->IgnoreImplicit());
+ if (NewStep.isInvalid())
+ return nullptr;
+ NewStep = SemaRef.PerformImplicitConversion(
+ NewStep.get(), Step->IgnoreImplicit()->getType(), Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStep.isInvalid())
+ return nullptr;
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
if (!Diff.isUsable())
return nullptr;
// OpenMP runtime requires 32-bit or 64-bit loop variables.
+ QualType Type = Diff.get()->getType();
+ auto &C = SemaRef.Context;
+ bool UseVarType = VarType->hasIntegerRepresentation() &&
+ C.getTypeSize(Type) > C.getTypeSize(VarType);
+ if (!Type->isIntegerType() || UseVarType) {
+ unsigned NewSize =
+ UseVarType ? C.getTypeSize(VarType) : C.getTypeSize(Type);
+ bool IsSigned = UseVarType ? VarType->hasSignedIntegerRepresentation()
+ : Type->hasSignedIntegerRepresentation();
+ Type = C.getIntTypeForBitwidth(NewSize, IsSigned);
+ Diff = SemaRef.PerformImplicitConversion(
+ Diff.get(), Type, Sema::AA_Converting, /*AllowExplicit=*/true);
+ if (!Diff.isUsable())
+ return nullptr;
+ }
if (LimitedType) {
- auto &C = SemaRef.Context;
- QualType Type = Diff.get()->getType();
unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32;
if (NewSize != C.getTypeSize(Type)) {
if (NewSize < C.getTypeSize(Type)) {
@@ -2627,7 +2756,8 @@ OpenMPIterationSpaceChecker::BuildNumIterations(Scope *S,
<< InitSrcRange << ConditionSrcRange;
}
QualType NewType = C.getIntTypeForBitwidth(
- NewSize, Type->hasSignedIntegerRepresentation());
+ NewSize, Type->hasSignedIntegerRepresentation() ||
+ C.getTypeSize(Type) < NewSize);
Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType,
Sema::AA_Converting, true);
if (!Diff.isUsable())
@@ -2642,10 +2772,29 @@ Expr *OpenMPIterationSpaceChecker::BuildPreCond(Scope *S, Expr *Cond) const {
// Try to build LB <op> UB, where <op> is <, >, <=, or >=.
bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
+ TransformToNewDefs Transform(SemaRef);
+
+ auto NewLB = Transform.TransformExpr(LB);
+ auto NewUB = Transform.TransformExpr(UB);
+ if (NewLB.isInvalid() || NewUB.isInvalid())
+ return Cond;
+ NewLB = SemaRef.PerformImplicitConversion(NewLB.get(), LB->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ NewUB = SemaRef.PerformImplicitConversion(NewUB.get(), UB->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewLB.isInvalid() || NewUB.isInvalid())
+ return Cond;
auto CondExpr = SemaRef.BuildBinOp(
S, DefaultLoc, TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
: (TestIsStrictOp ? BO_GT : BO_GE),
- LB, UB);
+ NewLB.get(), NewUB.get());
+ if (CondExpr.isUsable()) {
+ CondExpr = SemaRef.PerformImplicitConversion(
+ CondExpr.get(), SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
+ /*AllowExplicit=*/true);
+ }
SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
// Otherwise use original loop conditon and evaluate it in runtime.
return CondExpr.isUsable() ? CondExpr.get() : Cond;
@@ -2835,6 +2984,31 @@ static bool CheckOpenMPIterationSpace(
return HasErrors;
}
+/// \brief Build 'VarRef = Start.
+static ExprResult BuildCounterInit(Sema &SemaRef, Scope *S, SourceLocation Loc,
+ ExprResult VarRef, ExprResult Start) {
+ TransformToNewDefs Transform(SemaRef);
+ // Build 'VarRef = Start.
+ auto NewStart = Transform.TransformExpr(Start.get()->IgnoreImplicit());
+ if (NewStart.isInvalid())
+ return ExprError();
+ NewStart = SemaRef.PerformImplicitConversion(
+ NewStart.get(), Start.get()->IgnoreImplicit()->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStart.isInvalid())
+ return ExprError();
+ NewStart = SemaRef.PerformImplicitConversion(
+ NewStart.get(), VarRef.get()->getType(), Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (!NewStart.isUsable())
+ return ExprError();
+
+ auto Init =
+ SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
+ return Init;
+}
+
/// \brief Build 'VarRef = Start + Iter * Step'.
static ExprResult BuildCounterUpdate(Sema &SemaRef, Scope *S,
SourceLocation Loc, ExprResult VarRef,
@@ -2846,14 +3020,33 @@ static ExprResult BuildCounterUpdate(Sema &SemaRef, Scope *S,
!Step.isUsable())
return ExprError();
- ExprResult Update = SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(),
- Step.get()->IgnoreImplicit());
+ TransformToNewDefs Transform(SemaRef);
+ auto NewStep = Transform.TransformExpr(Step.get()->IgnoreImplicit());
+ if (NewStep.isInvalid())
+ return ExprError();
+ NewStep = SemaRef.PerformImplicitConversion(
+ NewStep.get(), Step.get()->IgnoreImplicit()->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStep.isInvalid())
+ return ExprError();
+ ExprResult Update =
+ SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(), NewStep.get());
if (!Update.isUsable())
return ExprError();
// Build 'VarRef = Start + Iter * Step'.
+ auto NewStart = Transform.TransformExpr(Start.get()->IgnoreImplicit());
+ if (NewStart.isInvalid())
+ return ExprError();
+ NewStart = SemaRef.PerformImplicitConversion(
+ NewStart.get(), Start.get()->IgnoreImplicit()->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStart.isInvalid())
+ return ExprError();
Update = SemaRef.BuildBinOp(S, Loc, (Subtract ? BO_Sub : BO_Add),
- Start.get()->IgnoreImplicit(), Update.get());
+ NewStart.get(), Update.get());
if (!Update.isUsable())
return ExprError();
@@ -2964,8 +3157,18 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
// true).
auto PreCond = ExprResult(IterSpaces[0].PreCond);
auto N0 = IterSpaces[0].NumIterations;
- ExprResult LastIteration32 = WidenIterationCount(32 /* Bits */, N0, SemaRef);
- ExprResult LastIteration64 = WidenIterationCount(64 /* Bits */, N0, SemaRef);
+ ExprResult LastIteration32 = WidenIterationCount(
+ 32 /* Bits */, SemaRef.PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
+ .get(),
+ SemaRef);
+ ExprResult LastIteration64 = WidenIterationCount(
+ 64 /* Bits */, SemaRef.PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
+ .get(),
+ SemaRef);
if (!LastIteration32.isUsable() || !LastIteration64.isUsable())
return NestedLoopCount;
@@ -2982,11 +3185,19 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
auto N = IterSpaces[Cnt].NumIterations;
AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
if (LastIteration32.isUsable())
- LastIteration32 = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Mul,
- LastIteration32.get(), N);
+ LastIteration32 = SemaRef.BuildBinOp(
+ CurScope, SourceLocation(), BO_Mul, LastIteration32.get(),
+ SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get());
if (LastIteration64.isUsable())
- LastIteration64 = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Mul,
- LastIteration64.get(), N);
+ LastIteration64 = SemaRef.BuildBinOp(
+ CurScope, SourceLocation(), BO_Mul, LastIteration64.get(),
+ SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get());
}
// Choose either the 32-bit or 64-bit version.
@@ -3147,6 +3358,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
// Build updates and final values of the loop counters.
bool HasErrors = false;
Built.Counters.resize(NestedLoopCount);
+ Built.Inits.resize(NestedLoopCount);
Built.Updates.resize(NestedLoopCount);
Built.Finals.resize(NestedLoopCount);
{
@@ -3180,6 +3392,12 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
SemaRef, cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl()),
IS.CounterVar->getType(), IS.CounterVar->getExprLoc(),
/*RefersToCapture=*/true);
+ ExprResult Init = BuildCounterInit(SemaRef, CurScope, UpdLoc, CounterVar,
+ IS.CounterInit);
+ if (!Init.isUsable()) {
+ HasErrors = true;
+ break;
+ }
ExprResult Update =
BuildCounterUpdate(SemaRef, CurScope, UpdLoc, CounterVar,
IS.CounterInit, Iter, IS.CounterStep, IS.Subtract);
@@ -3219,6 +3437,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
}
// Save results
Built.Counters[Cnt] = IS.CounterVar;
+ Built.Inits[Cnt] = Init.get();
Built.Updates[Cnt] = Update.get();
Built.Finals[Cnt] = Final.get();
}
@@ -3231,7 +3450,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
Built.IterationVarRef = IV.get();
Built.LastIteration = LastIteration.get();
Built.NumIterations = NumIterations.get();
- Built.CalcLastIteration = CalcLastIteration.get();
+ Built.CalcLastIteration =
+ SemaRef.ActOnFinishFullExpr(CalcLastIteration.get()).get();
Built.PreCond = PreCond.get();
Built.Cond = Cond.get();
Built.Init = Init.get();
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 3045629a333e..9fbf55bf15d1 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -3601,7 +3601,7 @@ ASTReader::ReadASTCore(StringRef FileName,
ModuleFile &F = *M;
BitstreamCursor &Stream = F.Stream;
- PCHContainerOps.ExtractPCH(F.Buffer->getMemBufferRef(), F.StreamFile);
+ PCHContainerRdr.ExtractPCH(F.Buffer->getMemBufferRef(), F.StreamFile);
Stream.init(&F.StreamFile);
F.SizeInBits = F.Buffer->getBufferSize() * 8;
@@ -3872,7 +3872,7 @@ static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile){
/// file.
std::string ASTReader::getOriginalSourceFile(
const std::string &ASTFileName, FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps, DiagnosticsEngine &Diags) {
+ const PCHContainerReader &PCHContainerRdr, DiagnosticsEngine &Diags) {
// Open the AST file.
auto Buffer = FileMgr.getBufferForFile(ASTFileName);
if (!Buffer) {
@@ -3883,7 +3883,7 @@ std::string ASTReader::getOriginalSourceFile(
// Initialize the stream
llvm::BitstreamReader StreamFile;
- PCHContainerOps.ExtractPCH((*Buffer)->getMemBufferRef(), StreamFile);
+ PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), StreamFile);
BitstreamCursor Stream(StreamFile);
// Sniff for the signature.
@@ -3967,7 +3967,7 @@ namespace {
bool ASTReader::readASTFileControlBlock(
StringRef Filename, FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
ASTReaderListener &Listener) {
// Open the AST file.
// FIXME: This allows use of the VFS; we do not allow use of the
@@ -3979,7 +3979,7 @@ bool ASTReader::readASTFileControlBlock(
// Initialize the stream
llvm::BitstreamReader StreamFile;
- PCHContainerOps.ExtractPCH((*Buffer)->getMemBufferRef(), StreamFile);
+ PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), StreamFile);
BitstreamCursor Stream(StreamFile);
// Sniff for the signature.
@@ -4160,12 +4160,12 @@ bool ASTReader::readASTFileControlBlock(
bool ASTReader::isAcceptableASTFile(
StringRef Filename, FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps, const LangOptions &LangOpts,
+ const PCHContainerReader &PCHContainerRdr, const LangOptions &LangOpts,
const TargetOptions &TargetOpts, const PreprocessorOptions &PPOpts,
std::string ExistingModuleCachePath) {
SimplePCHValidator validator(LangOpts, TargetOpts, PPOpts,
ExistingModuleCachePath, FileMgr);
- return !readASTFileControlBlock(Filename, FileMgr, PCHContainerOps,
+ return !readASTFileControlBlock(Filename, FileMgr, PCHContainerRdr,
validator);
}
@@ -8472,7 +8472,7 @@ void ASTReader::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
}
ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
StringRef isysroot, bool DisableValidation,
bool AllowASTWithCompilerErrors,
bool AllowConfigurationMismatch, bool ValidateSystemInputs,
@@ -8480,9 +8480,9 @@ ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
std::unique_ptr<llvm::Timer> ReadTimer)
: Listener(new PCHValidator(PP, *this)), DeserializationListener(nullptr),
OwnsDeserializationListener(false), SourceMgr(PP.getSourceManager()),
- FileMgr(PP.getFileManager()), PCHContainerOps(PCHContainerOps),
+ FileMgr(PP.getFileManager()), PCHContainerRdr(PCHContainerRdr),
Diags(PP.getDiagnostics()), SemaObj(nullptr), PP(PP), Context(Context),
- Consumer(nullptr), ModuleMgr(PP.getFileManager(), PCHContainerOps),
+ Consumer(nullptr), ModuleMgr(PP.getFileManager(), PCHContainerRdr),
ReadTimer(std::move(ReadTimer)),
isysroot(isysroot), DisableValidation(DisableValidation),
AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index ca5d0be6329a..76e8334695f7 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -2110,6 +2110,10 @@ void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
Sub.clear();
for (unsigned i = 0; i < CollapsedNum; ++i)
Sub.push_back(Reader.ReadSubExpr());
+ D->setInits(Sub);
+ Sub.clear();
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
D->setUpdates(Sub);
Sub.clear();
for (unsigned i = 0; i < CollapsedNum; ++i)
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index cddefaca8f35..0dd809036a3d 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -1954,6 +1954,9 @@ void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
for (auto I : D->counters()) {
Writer.AddStmt(I);
}
+ for (auto I : D->inits()) {
+ Writer.AddStmt(I);
+ }
for (auto I : D->updates()) {
Writer.AddStmt(I);
}
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index 2c7da3e82a44..17c7914243e7 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -385,7 +385,7 @@ namespace {
/// \brief Builder that generates the global module index file.
class GlobalModuleIndexBuilder {
FileManager &FileMgr;
- const PCHContainerOperations &PCHContainerOps;
+ const PCHContainerReader &PCHContainerRdr;
/// \brief Mapping from files to module file information.
typedef llvm::MapVector<const FileEntry *, ModuleFileInfo> ModuleFilesMap;
@@ -419,8 +419,8 @@ namespace {
public:
explicit GlobalModuleIndexBuilder(
- FileManager &FileMgr, const PCHContainerOperations &PCHContainerOps)
- : FileMgr(FileMgr), PCHContainerOps(PCHContainerOps) {}
+ FileManager &FileMgr, const PCHContainerReader &PCHContainerRdr)
+ : FileMgr(FileMgr), PCHContainerRdr(PCHContainerRdr) {}
/// \brief Load the contents of the given module file into the builder.
///
@@ -505,7 +505,7 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Initialize the input stream
llvm::BitstreamReader InStreamFile;
- PCHContainerOps.ExtractPCH((*Buffer)->getMemBufferRef(), InStreamFile);
+ PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), InStreamFile);
llvm::BitstreamCursor InStream(InStreamFile);
// Sniff for the signature.
@@ -768,7 +768,7 @@ void GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
GlobalModuleIndex::ErrorCode
GlobalModuleIndex::writeIndex(FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps,
+ const PCHContainerReader &PCHContainerRdr,
StringRef Path) {
llvm::SmallString<128> IndexPath;
IndexPath += Path;
@@ -792,7 +792,7 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
}
// The module index builder.
- GlobalModuleIndexBuilder Builder(FileMgr, PCHContainerOps);
+ GlobalModuleIndexBuilder Builder(FileMgr, PCHContainerRdr);
// Load each of the module files.
std::error_code EC;
diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp
index 03d8ed0e2467..271619404d2f 100644
--- a/lib/Serialization/ModuleManager.cpp
+++ b/lib/Serialization/ModuleManager.cpp
@@ -139,7 +139,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
}
// Initialize the stream.
- PCHContainerOps.ExtractPCH(New->Buffer->getMemBufferRef(), New->StreamFile);
+ PCHContainerRdr.ExtractPCH(New->Buffer->getMemBufferRef(), New->StreamFile);
}
if (ExpectedSignature) {
@@ -290,8 +290,8 @@ void ModuleManager::moduleFileAccepted(ModuleFile *MF) {
}
ModuleManager::ModuleManager(FileManager &FileMgr,
- const PCHContainerOperations &PCHContainerOps)
- : FileMgr(FileMgr), PCHContainerOps(PCHContainerOps), GlobalIndex(),
+ const PCHContainerReader &PCHContainerRdr)
+ : FileMgr(FileMgr), PCHContainerRdr(PCHContainerRdr), GlobalIndex(),
FirstVisitState(nullptr) {}
ModuleManager::~ModuleManager() {
diff --git a/test/Analysis/dead-stores.m b/test/Analysis/dead-stores.m
index 13b28dcf5374..9989efae2e8e 100644
--- a/test/Analysis/dead-stores.m
+++ b/test/Analysis/dead-stores.m
@@ -28,7 +28,7 @@ extern NSString *NSAlignmentBinding;
// This test case was reported as a false positive due to a bug in the
// LiveVariables <-> deadcode.DeadStores interplay. We should not flag a warning
// here. The test case was reported in:
-// http://lists.cs.uiuc.edu/pipermail/cfe-dev/2008-July/002157.html
+// http://lists.llvm.org/pipermail/cfe-dev/2008-July/002157.html
void DeadStoreTest(NSObject *anObject) {
NSArray *keys;
if ((keys = [anObject exposedBindings]) && // no-warning
diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c
index 8e8216b10111..32166b50f985 100644
--- a/test/CodeGen/builtins-ppc-altivec.c
+++ b/test/CodeGen/builtins-ppc-altivec.c
@@ -3307,81 +3307,225 @@ void test6() {
/* vec_sld */
res_vsc = vec_sld(vsc, vsc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_sld(vuc, vuc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_sld(vs, vs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_sld(vus, vus, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vbs = vec_sld(vbs, vbs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
res_vp = vec_sld(vp, vp, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_sld(vi, vi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_sld(vui, vui, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vbi = vec_sld(vbi, vbi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: xor <16 x i8>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
res_vf = vec_sld(vf, vf, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vsc = vec_vsldoi(vsc, vsc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_vsldoi(vuc, vuc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_vsldoi(vs, vs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_vsldoi(vus, vus, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vp = vec_vsldoi(vp, vp, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_vsldoi(vi, vi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_vsldoi(vui, vui, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vf = vec_vsldoi(vf, vf, 0);
-// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_sll */
diff --git a/test/CodeGen/builtins-systemz-zvector-error.c b/test/CodeGen/builtins-systemz-zvector-error.c
new file mode 100644
index 000000000000..8d5380dac161
--- /dev/null
+++ b/test/CodeGen/builtins-systemz-zvector-error.c
@@ -0,0 +1,576 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
+// RUN: -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -fsyntax-only -verify %s
+
+#include <vecintrin.h>
+
+volatile vector signed char vsc;
+volatile vector signed short vss;
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned char vuc;
+volatile vector unsigned short vus;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool char vbc;
+volatile vector bool short vbs;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector double vd;
+
+volatile signed char sc;
+volatile signed short ss;
+volatile signed int si;
+volatile signed long long sl;
+volatile unsigned char uc;
+volatile unsigned short us;
+volatile unsigned int ui;
+volatile unsigned long long ul;
+volatile double d;
+
+const void * volatile cptr;
+const signed char * volatile cptrsc;
+const signed short * volatile cptrss;
+const signed int * volatile cptrsi;
+const signed long long * volatile cptrsl;
+const unsigned char * volatile cptruc;
+const unsigned short * volatile cptrus;
+const unsigned int * volatile cptrui;
+const unsigned long long * volatile cptrul;
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+void * volatile ptr;
+signed char * volatile ptrsc;
+signed short * volatile ptrss;
+signed int * volatile ptrsi;
+signed long long * volatile ptrsl;
+unsigned char * volatile ptruc;
+unsigned short * volatile ptrus;
+unsigned int * volatile ptrui;
+unsigned long long * volatile ptrul;
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile unsigned int len;
+volatile int idx;
+int cc;
+
+void test_core(void) {
+ len = __lcbb(cptr, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 200); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 8192); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+
+ vsl = vec_permi(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_permi(vsl, vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_permi(vsl, vsl, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+
+ vsi = vec_gather_element(vsi, vui, cptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_gather_element(vsi, vui, cptrsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_gather_element(vsi, vui, cptrsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vec_scatter_element(vsi, vui, ptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsi, vui, ptrsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsi, vui, ptrsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsl, vul, ptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vsl, vul, ptrsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vsl, vul, ptrsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vsc = vec_load_bndry(cptrsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 200); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 8192); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vuc = vec_load_bndry(cptruc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vss = vec_load_bndry(cptrss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vus = vec_load_bndry(cptrus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsi = vec_load_bndry(cptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vui = vec_load_bndry(cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsl = vec_load_bndry(cptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vul = vec_load_bndry(cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+
+ vuc = vec_genmask(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+
+ vuc = vec_genmasks_8(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_genmasks_8(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_genmasks_8(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+
+ vsc = vec_splat(vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_splat(vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_splat(vsc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vss = vec_splat(vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vss = vec_splat(vss, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vss = vec_splat(vss, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vsi = vec_splat(vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_splat(vsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_splat(vsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vsl = vec_splat(vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_splat(vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_splat(vsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vsc = vec_splat_s8(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_splat_u8(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vss = vec_splat_s16(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_splat_u16(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vsi = vec_splat_s32(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_splat_u32(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vsl = vec_splat_s64(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_splat_u64(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+}
+
+void test_integer(void) {
+ vsc = vec_rl_mask(vsc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vuc = vec_rl_mask(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vss = vec_rl_mask(vss, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vus = vec_rl_mask(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vsi = vec_rl_mask(vsi, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vui = vec_rl_mask(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vsl = vec_rl_mask(vsl, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vul = vec_rl_mask(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+
+ vsc = vec_sld(vsc, vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_sld(vsc, vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_sld(vsc, vsc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vss = vec_sld(vss, vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vus = vec_sld(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsi = vec_sld(vsi, vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vui = vec_sld(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsl = vec_sld(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vul = vec_sld(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vd = vec_sld(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+
+ vsc = vec_sldw(vsc, vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsc = vec_sldw(vsc, vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsc = vec_sldw(vsc, vsc, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vss = vec_sldw(vss, vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vus = vec_sldw(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_sldw(vsi, vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_sldw(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_sldw(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vul = vec_sldw(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_sldw(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+}
+
+void test_float(void) {
+ vd = vec_ctd(vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vsl, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+
+ vsl = vec_ctsl(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vsl = vec_ctsl(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vsl = vec_ctsl(vd, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+
+ vbl = vec_fp_test_data_class(vd, idx, &cc); // expected-error {{must be a constant integer}}
+ vbl = vec_fp_test_data_class(vd, -1, &cc); // expected-error {{should be a value from 0 to 4095}}
+ vbl = vec_fp_test_data_class(vd, 4096, &cc); // expected-error {{should be a value from 0 to 4095}}
+}
diff --git a/test/CodeGen/builtins-systemz-zvector.c b/test/CodeGen/builtins-systemz-zvector.c
new file mode 100644
index 000000000000..6d554af44e93
--- /dev/null
+++ b/test/CodeGen/builtins-systemz-zvector.c
@@ -0,0 +1,2967 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+
+#include <vecintrin.h>
+
+volatile vector signed char vsc;
+volatile vector signed short vss;
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned char vuc;
+volatile vector unsigned short vus;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool char vbc;
+volatile vector bool short vbs;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector double vd;
+
+volatile signed char sc;
+volatile signed short ss;
+volatile signed int si;
+volatile signed long long sl;
+volatile unsigned char uc;
+volatile unsigned short us;
+volatile unsigned int ui;
+volatile unsigned long long ul;
+volatile double d;
+
+const void * volatile cptr;
+const signed char * volatile cptrsc;
+const signed short * volatile cptrss;
+const signed int * volatile cptrsi;
+const signed long long * volatile cptrsl;
+const unsigned char * volatile cptruc;
+const unsigned short * volatile cptrus;
+const unsigned int * volatile cptrui;
+const unsigned long long * volatile cptrul;
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+void * volatile ptr;
+signed char * volatile ptrsc;
+signed short * volatile ptrss;
+signed int * volatile ptrsi;
+signed long long * volatile ptrsl;
+unsigned char * volatile ptruc;
+unsigned short * volatile ptrus;
+unsigned int * volatile ptrui;
+unsigned long long * volatile ptrul;
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile unsigned int len;
+volatile int idx;
+int cc;
+
+void test_core(void) {
+ len = __lcbb(cptr, 64);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 0)
+ len = __lcbb(cptr, 128);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 1)
+ len = __lcbb(cptr, 256);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 2)
+ len = __lcbb(cptr, 512);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 3)
+ len = __lcbb(cptr, 1024);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 4)
+ len = __lcbb(cptr, 2048);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 5)
+ len = __lcbb(cptr, 4096);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 6)
+
+ sc = vec_extract(vsc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ uc = vec_extract(vuc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ uc = vec_extract(vbc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ ss = vec_extract(vss, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ us = vec_extract(vus, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ us = vec_extract(vbs, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ si = vec_extract(vsi, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ ui = vec_extract(vui, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ ui = vec_extract(vbi, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ sl = vec_extract(vsl, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ ul = vec_extract(vul, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ ul = vec_extract(vbl, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ d = vec_extract(vd, idx);
+ // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_insert(sc, vsc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_insert(uc, vuc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_insert(uc, vbc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vss = vec_insert(ss, vss, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_insert(us, vus, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_insert(us, vbs, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vsi = vec_insert(si, vsi, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_insert(ui, vui, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_insert(ui, vbi, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vsl = vec_insert(sl, vsl, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_insert(ul, vul, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_insert(ul, vbl, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vd = vec_insert(d, vd, idx);
+ // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_promote(sc, idx);
+ // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_promote(uc, idx);
+ // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 %{{.*}}
+ vss = vec_promote(ss, idx);
+ // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_promote(us, idx);
+ // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 %{{.*}}
+ vsi = vec_promote(si, idx);
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_promote(ui, idx);
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 %{{.*}}
+ vsl = vec_promote(sl, idx);
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_promote(ul, idx);
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 %{{.*}}
+ vd = vec_promote(d, idx);
+ // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_insert_and_zero(cptrsc);
+ // CHECK: insertelement <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i8 %{{.*}}, i32 7
+ vuc = vec_insert_and_zero(cptruc);
+ // CHECK: insertelement <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i8 %{{.*}}, i32 7
+ vss = vec_insert_and_zero(cptrss);
+ // CHECK: insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0>, i16 %{{.*}}, i32 3
+ vus = vec_insert_and_zero(cptrus);
+ // CHECK: insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0>, i16 %{{.*}}, i32 3
+ vsi = vec_insert_and_zero(cptrsi);
+ // CHECK: insertelement <4 x i32> <i32 0, i32 undef, i32 0, i32 0>, i32 %{{.*}}, i32 1
+ vui = vec_insert_and_zero(cptrui);
+ // CHECK: insertelement <4 x i32> <i32 0, i32 undef, i32 0, i32 0>, i32 %{{.*}}, i32 1
+ vsl = vec_insert_and_zero(cptrsl);
+ // CHECK: insertelement <2 x i64> <i64 undef, i64 0>, i64 %{{.*}}, i32 0
+ vul = vec_insert_and_zero(cptrul);
+ // CHECK: insertelement <2 x i64> <i64 undef, i64 0>, i64 %{{.*}}, i32 0
+ vd = vec_insert_and_zero(cptrd);
+ // CHECK: insertelement <2 x double> <double undef, double 0.000000e+00>, double %{{.*}}, i32 0
+
+ vsc = vec_perm(vsc, vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_perm(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_perm(vbc, vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_perm(vss, vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_perm(vus, vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_perm(vbs, vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_perm(vsi, vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_perm(vui, vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_perm(vbi, vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_perm(vsl, vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_perm(vul, vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_perm(vbl, vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_perm(vd, vd, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsl = vec_permi(vsl, vsl, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vsl = vec_permi(vsl, vsl, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vsl = vec_permi(vsl, vsl, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vsl = vec_permi(vsl, vsl, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vul = vec_permi(vul, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vul = vec_permi(vul, vul, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vul = vec_permi(vul, vul, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vul = vec_permi(vul, vul, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vbl = vec_permi(vbl, vbl, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vbl = vec_permi(vbl, vbl, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vbl = vec_permi(vbl, vbl, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vbl = vec_permi(vbl, vbl, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vd = vec_permi(vd, vd, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vd = vec_permi(vd, vd, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vd = vec_permi(vd, vd, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vd = vec_permi(vd, vd, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+
+ vsc = vec_sel(vsc, vsc, vuc);
+ vsc = vec_sel(vsc, vsc, vbc);
+ vuc = vec_sel(vuc, vuc, vuc);
+ vuc = vec_sel(vuc, vuc, vbc);
+ vbc = vec_sel(vbc, vbc, vuc);
+ vbc = vec_sel(vbc, vbc, vbc);
+ vss = vec_sel(vss, vss, vus);
+ vss = vec_sel(vss, vss, vbs);
+ vus = vec_sel(vus, vus, vus);
+ vus = vec_sel(vus, vus, vbs);
+ vbs = vec_sel(vbs, vbs, vus);
+ vbs = vec_sel(vbs, vbs, vbs);
+ vsi = vec_sel(vsi, vsi, vui);
+ vsi = vec_sel(vsi, vsi, vbi);
+ vui = vec_sel(vui, vui, vui);
+ vui = vec_sel(vui, vui, vbi);
+ vbi = vec_sel(vbi, vbi, vui);
+ vbi = vec_sel(vbi, vbi, vbi);
+ vsl = vec_sel(vsl, vsl, vul);
+ vsl = vec_sel(vsl, vsl, vbl);
+ vul = vec_sel(vul, vul, vul);
+ vul = vec_sel(vul, vul, vbl);
+ vbl = vec_sel(vbl, vbl, vul);
+ vbl = vec_sel(vbl, vbl, vbl);
+ vd = vec_sel(vd, vd, vul);
+ vd = vec_sel(vd, vd, vbl);
+
+ vsi = vec_gather_element(vsi, vui, cptrsi, 0);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 1);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 2);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 3);
+ vui = vec_gather_element(vui, vui, cptrui, 0);
+ vui = vec_gather_element(vui, vui, cptrui, 1);
+ vui = vec_gather_element(vui, vui, cptrui, 2);
+ vui = vec_gather_element(vui, vui, cptrui, 3);
+ vbi = vec_gather_element(vbi, vui, cptrui, 0);
+ vbi = vec_gather_element(vbi, vui, cptrui, 1);
+ vbi = vec_gather_element(vbi, vui, cptrui, 2);
+ vbi = vec_gather_element(vbi, vui, cptrui, 3);
+ vsl = vec_gather_element(vsl, vul, cptrsl, 0);
+ vsl = vec_gather_element(vsl, vul, cptrsl, 1);
+ vul = vec_gather_element(vul, vul, cptrul, 0);
+ vul = vec_gather_element(vul, vul, cptrul, 1);
+ vbl = vec_gather_element(vbl, vul, cptrul, 0);
+ vbl = vec_gather_element(vbl, vul, cptrul, 1);
+ vd = vec_gather_element(vd, vul, cptrd, 0);
+ vd = vec_gather_element(vd, vul, cptrd, 1);
+
+ vec_scatter_element(vsi, vui, ptrsi, 0);
+ vec_scatter_element(vsi, vui, ptrsi, 1);
+ vec_scatter_element(vsi, vui, ptrsi, 2);
+ vec_scatter_element(vsi, vui, ptrsi, 3);
+ vec_scatter_element(vui, vui, ptrui, 0);
+ vec_scatter_element(vui, vui, ptrui, 1);
+ vec_scatter_element(vui, vui, ptrui, 2);
+ vec_scatter_element(vui, vui, ptrui, 3);
+ vec_scatter_element(vbi, vui, ptrui, 0);
+ vec_scatter_element(vbi, vui, ptrui, 1);
+ vec_scatter_element(vbi, vui, ptrui, 2);
+ vec_scatter_element(vbi, vui, ptrui, 3);
+ vec_scatter_element(vsl, vul, ptrsl, 0);
+ vec_scatter_element(vsl, vul, ptrsl, 1);
+ vec_scatter_element(vul, vul, ptrul, 0);
+ vec_scatter_element(vul, vul, ptrul, 1);
+ vec_scatter_element(vbl, vul, ptrul, 0);
+ vec_scatter_element(vbl, vul, ptrul, 1);
+ vec_scatter_element(vd, vul, ptrd, 0);
+ vec_scatter_element(vd, vul, ptrd, 1);
+
+ vsc = vec_xld2(idx, cptrsc);
+ vuc = vec_xld2(idx, cptruc);
+ vss = vec_xld2(idx, cptrss);
+ vus = vec_xld2(idx, cptrus);
+ vsi = vec_xld2(idx, cptrsi);
+ vui = vec_xld2(idx, cptrui);
+ vsl = vec_xld2(idx, cptrsl);
+ vul = vec_xld2(idx, cptrul);
+ vd = vec_xld2(idx, cptrd);
+
+ vsc = vec_xlw4(idx, cptrsc);
+ vuc = vec_xlw4(idx, cptruc);
+ vss = vec_xlw4(idx, cptrss);
+ vus = vec_xlw4(idx, cptrus);
+ vsi = vec_xlw4(idx, cptrsi);
+ vui = vec_xlw4(idx, cptrui);
+
+ vec_xstd2(vsc, idx, ptrsc);
+ vec_xstd2(vuc, idx, ptruc);
+ vec_xstd2(vss, idx, ptrss);
+ vec_xstd2(vus, idx, ptrus);
+ vec_xstd2(vsi, idx, ptrsi);
+ vec_xstd2(vui, idx, ptrui);
+ vec_xstd2(vsl, idx, ptrsl);
+ vec_xstd2(vul, idx, ptrul);
+ vec_xstd2(vd, idx, ptrd);
+
+ vec_xstw4(vsc, idx, ptrsc);
+ vec_xstw4(vuc, idx, ptruc);
+ vec_xstw4(vss, idx, ptrss);
+ vec_xstw4(vus, idx, ptrus);
+ vec_xstw4(vsi, idx, ptrsi);
+ vec_xstw4(vui, idx, ptrui);
+
+ vsc = vec_load_bndry(cptrsc, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vuc = vec_load_bndry(cptruc, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vss = vec_load_bndry(cptrss, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vus = vec_load_bndry(cptrus, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsi = vec_load_bndry(cptrsi, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vui = vec_load_bndry(cptrui, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsl = vec_load_bndry(cptrsl, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vul = vec_load_bndry(cptrul, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vd = vec_load_bndry(cptrd, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsc = vec_load_bndry(cptrsc, 128);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 1)
+ vsc = vec_load_bndry(cptrsc, 256);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 2)
+ vsc = vec_load_bndry(cptrsc, 512);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 3)
+ vsc = vec_load_bndry(cptrsc, 1024);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 4)
+ vsc = vec_load_bndry(cptrsc, 2048);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 5)
+ vsc = vec_load_bndry(cptrsc, 4096);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 6)
+
+ vsc = vec_load_len(cptrsc, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vuc = vec_load_len(cptruc, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vss = vec_load_len(cptrss, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vus = vec_load_len(cptrus, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vsi = vec_load_len(cptrsi, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vui = vec_load_len(cptrui, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vsl = vec_load_len(cptrsl, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vul = vec_load_len(cptrul, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vd = vec_load_len(cptrd, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+
+ vec_store_len(vsc, ptrsc, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vuc, ptruc, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vss, ptrss, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vus, ptrus, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vsi, ptrsi, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vui, ptrui, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vsl, ptrsl, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vul, ptrul, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vd, ptrd, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+
+ vsl = vec_load_pair(sl, sl);
+ vul = vec_load_pair(ul, ul);
+
+ vuc = vec_genmask(0);
+ // CHECK: <16 x i8> zeroinitializer
+ vuc = vec_genmask(0x8000);
+ // CHECK: <16 x i8> <i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ vuc = vec_genmask(0xffff);
+ // CHECK: <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+
+ vuc = vec_genmasks_8(0, 7);
+ // CHECK: <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ vuc = vec_genmasks_8(1, 4);
+ // CHECK: <16 x i8> <i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120>
+ vuc = vec_genmasks_8(6, 2);
+ // CHECK: <16 x i8> <i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29>
+ vus = vec_genmasks_16(0, 15);
+ // CHECK: <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ vus = vec_genmasks_16(2, 11);
+ // CHECK: <8 x i16> <i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368>
+ vus = vec_genmasks_16(9, 2);
+ // CHECK: <8 x i16> <i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065>
+ vui = vec_genmasks_32(0, 31);
+ // CHECK: <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ vui = vec_genmasks_32(7, 20);
+ // CHECK: <4 x i32> <i32 33552384, i32 33552384, i32 33552384, i32 33552384>
+ vui = vec_genmasks_32(25, 4);
+ // CHECK: <4 x i32> <i32 -134217601, i32 -134217601, i32 -134217601, i32 -134217601>
+ vul = vec_genmasks_64(0, 63);
+ // CHECK: <2 x i64> <i64 -1, i64 -1>
+ vul = vec_genmasks_64(3, 40);
+ // CHECK: <2 x i64> <i64 2305843009205305344, i64 2305843009205305344>
+ vul = vec_genmasks_64(30, 11);
+ // CHECK: <2 x i64> <i64 -4503582447501313, i64 -4503582447501313>
+
+ vsc = vec_splat(vsc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vsc = vec_splat(vsc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vuc = vec_splat(vuc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vuc = vec_splat(vuc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vbc = vec_splat(vbc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vbc = vec_splat(vbc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vss = vec_splat(vss, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vss = vec_splat(vss, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vus = vec_splat(vus, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vus = vec_splat(vus, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vbs = vec_splat(vbs, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vbs = vec_splat(vbs, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vsi = vec_splat(vsi, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vsi = vec_splat(vsi, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vui = vec_splat(vui, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vui = vec_splat(vui, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vbi = vec_splat(vbi, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vbi = vec_splat(vbi, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vsl = vec_splat(vsl, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vsl = vec_splat(vsl, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vul = vec_splat(vul, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vul = vec_splat(vul, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vbl = vec_splat(vbl, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vbl = vec_splat(vbl, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vd = vec_splat(vd, 0);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+ vd = vec_splat(vd, 1);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+
+ vsc = vec_splat_s8(-128);
+ // CHECK: <16 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
+ vsc = vec_splat_s8(127);
+ // CHECK: <16 x i8> <i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127>
+ vuc = vec_splat_u8(1);
+ // CHECK: <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ vuc = vec_splat_u8(254);
+ // CHECK: <16 x i8> <i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2>
+ vss = vec_splat_s16(-32768);
+ // CHECK: <8 x i16> <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
+ vss = vec_splat_s16(32767);
+ // CHECK: <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>
+ vus = vec_splat_u16(1);
+ // CHECK: <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ vus = vec_splat_u16(65534);
+ // CHECK: <8 x i16> <i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2>
+ vsi = vec_splat_s32(-32768);
+ // CHECK: <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ vsi = vec_splat_s32(32767);
+ // CHECK: <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+ vui = vec_splat_u32(-32768);
+ // CHECK: <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ vui = vec_splat_u32(32767);
+ // CHECK: <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+ vsl = vec_splat_s64(-32768);
+ // CHECK: <2 x i64> <i64 -32768, i64 -32768>
+ vsl = vec_splat_s64(32767);
+ // CHECK: <2 x i64> <i64 32767, i64 32767>
+ vul = vec_splat_u64(-32768);
+ // CHECK: <2 x i64> <i64 -32768, i64 -32768>
+ vul = vec_splat_u64(32767);
+ // CHECK: <2 x i64> <i64 32767, i64 32767>
+
+ vsc = vec_splats(sc);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vuc = vec_splats(uc);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vss = vec_splats(ss);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vus = vec_splats(us);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vsi = vec_splats(si);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vui = vec_splats(ui);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vsl = vec_splats(sl);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vul = vec_splats(ul);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vd = vec_splats(d);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+
+ vsl = vec_extend_s64(vsc);
+ vsl = vec_extend_s64(vss);
+ vsl = vec_extend_s64(vsi);
+
+ vsc = vec_mergeh(vsc, vsc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vuc = vec_mergeh(vuc, vuc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vbc = vec_mergeh(vbc, vbc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vss = vec_mergeh(vss, vss);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vus = vec_mergeh(vus, vus);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vbs = vec_mergeh(vbs, vbs);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vsi = vec_mergeh(vsi, vsi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vui = vec_mergeh(vui, vui);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vbi = vec_mergeh(vbi, vbi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vsl = vec_mergeh(vsl, vsl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vul = vec_mergeh(vul, vul);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vbl = vec_mergeh(vbl, vbl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vd = vec_mergeh(vd, vd);
+ // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
+
+ vsc = vec_mergel(vsc, vsc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vuc = vec_mergel(vuc, vuc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vbc = vec_mergel(vbc, vbc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vss = vec_mergel(vss, vss);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vus = vec_mergel(vus, vus);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vbs = vec_mergel(vbs, vbs);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vsi = vec_mergel(vsi, vsi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vui = vec_mergel(vui, vui);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vbi = vec_mergel(vbi, vbi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vsl = vec_mergel(vsl, vsl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vul = vec_mergel(vul, vul);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vbl = vec_mergel(vbl, vbl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vd = vec_mergel(vd, vd);
+ // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <i32 1, i32 3>
+
+ vsc = vec_pack(vss, vss);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vuc = vec_pack(vus, vus);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vbc = vec_pack(vbs, vbs);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vss = vec_pack(vsi, vsi);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vus = vec_pack(vui, vui);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vbs = vec_pack(vbi, vbi);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vsi = vec_pack(vsl, vsl);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ vui = vec_pack(vul, vul);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ vbi = vec_pack(vbl, vbl);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+
+ vsc = vec_packs(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vpksh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packs(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vss = vec_packs(vsi, vsi);
+ // CHECK: call <8 x i16> @llvm.s390.vpksf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packs(vui, vui);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsi = vec_packs(vsl, vsl);
+ // CHECK: call <4 x i32> @llvm.s390.vpksg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packs(vul, vul);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_packs_cc(vss, vss, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpkshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packs_cc(vus, vus, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpklshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vss = vec_packs_cc(vsi, vsi, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpksfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packs_cc(vui, vui, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpklsfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsi = vec_packs_cc(vsl, vsl, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpksgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packs_cc(vul, vul, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpklsgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_packsu(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packsu(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_packsu(vsi, vsi);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packsu(vui, vui);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_packsu(vsl, vsl);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packsu(vul, vul);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_packsu_cc(vus, vus, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpklshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_packsu_cc(vui, vui, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpklsfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_packsu_cc(vul, vul, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpklsgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vss = vec_unpackh(vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vuphb(<16 x i8> %{{.*}})
+ vus = vec_unpackh(vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplhb(<16 x i8> %{{.*}})
+ vbs = vec_unpackh(vbc);
+ // CHECK: call <8 x i16> @llvm.s390.vuphb(<16 x i8> %{{.*}})
+ vsi = vec_unpackh(vss);
+ // CHECK: call <4 x i32> @llvm.s390.vuphh(<8 x i16> %{{.*}})
+ vui = vec_unpackh(vus);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhh(<8 x i16> %{{.*}})
+ vbi = vec_unpackh(vbs);
+ // CHECK: call <4 x i32> @llvm.s390.vuphh(<8 x i16> %{{.*}})
+ vsl = vec_unpackh(vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vuphf(<4 x i32> %{{.*}})
+ vul = vec_unpackh(vui);
+ // CHECK: call <2 x i64> @llvm.s390.vuplhf(<4 x i32> %{{.*}})
+ vbl = vec_unpackh(vbi);
+ // CHECK: call <2 x i64> @llvm.s390.vuphf(<4 x i32> %{{.*}})
+
+ vss = vec_unpackl(vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplb(<16 x i8> %{{.*}})
+ vus = vec_unpackl(vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vupllb(<16 x i8> %{{.*}})
+ vbs = vec_unpackl(vbc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplb(<16 x i8> %{{.*}})
+ vsi = vec_unpackl(vss);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhw(<8 x i16> %{{.*}})
+ vui = vec_unpackl(vus);
+ // CHECK: call <4 x i32> @llvm.s390.vupllh(<8 x i16> %{{.*}})
+ vbi = vec_unpackl(vbs);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhw(<8 x i16> %{{.*}})
+ vsl = vec_unpackl(vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vuplf(<4 x i32> %{{.*}})
+ vul = vec_unpackl(vui);
+ // CHECK: call <2 x i64> @llvm.s390.vupllf(<4 x i32> %{{.*}})
+ vbl = vec_unpackl(vbi);
+ // CHECK: call <2 x i64> @llvm.s390.vuplf(<4 x i32> %{{.*}})
+}
+
+void test_compare(void) {
+ vbc = vec_cmpeq(vsc, vsc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpeq(vuc, vuc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpeq(vbc, vbc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vss, vss);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vus, vus);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vbs, vbs);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vsi, vsi);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vui, vui);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vbi, vbi);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vsl, vsl);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vul, vul);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vbl, vbl);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vd, vd);
+ // CHECK: fcmp oeq <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmpge(vsc, vsc);
+ // CHECK: icmp sge <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpge(vuc, vuc);
+ // CHECK: icmp uge <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpge(vss, vss);
+ // CHECK: icmp sge <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpge(vus, vus);
+ // CHECK: icmp uge <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpge(vsi, vsi);
+ // CHECK: icmp sge <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpge(vui, vui);
+ // CHECK: icmp uge <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vsl, vsl);
+ // CHECK: icmp sge <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vul, vul);
+ // CHECK: icmp uge <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vd, vd);
+ // CHECK: fcmp oge <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmpgt(vsc, vsc);
+ // CHECK: icmp sgt <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpgt(vuc, vuc);
+ // CHECK: icmp ugt <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpgt(vss, vss);
+ // CHECK: icmp sgt <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpgt(vus, vus);
+ // CHECK: icmp ugt <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpgt(vsi, vsi);
+ // CHECK: icmp sgt <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpgt(vui, vui);
+ // CHECK: icmp ugt <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vsl, vsl);
+ // CHECK: icmp sgt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vul, vul);
+ // CHECK: icmp ugt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vd, vd);
+ // CHECK: fcmp ogt <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmple(vsc, vsc);
+ // CHECK: icmp sle <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmple(vuc, vuc);
+ // CHECK: icmp ule <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmple(vss, vss);
+ // CHECK: icmp sle <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmple(vus, vus);
+ // CHECK: icmp ule <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmple(vsi, vsi);
+ // CHECK: icmp sle <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmple(vui, vui);
+ // CHECK: icmp ule <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vsl, vsl);
+ // CHECK: icmp sle <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vul, vul);
+ // CHECK: icmp ule <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vd, vd);
+ // CHECK: fcmp ole <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmplt(vsc, vsc);
+ // CHECK: icmp slt <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmplt(vuc, vuc);
+ // CHECK: icmp ult <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmplt(vss, vss);
+ // CHECK: icmp slt <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmplt(vus, vus);
+ // CHECK: icmp ult <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmplt(vsi, vsi);
+ // CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmplt(vui, vui);
+ // CHECK: icmp ult <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vsl, vsl);
+ // CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vul, vul);
+ // CHECK: icmp ult <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vd, vd);
+ // CHECK: fcmp olt <2 x double> %{{.*}}, %{{.*}}
+
+ idx = vec_all_eq(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_ne(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_ge(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_gt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_le(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_lt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_nge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_ngt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_nle(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_nlt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_nan(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+ idx = vec_all_numeric(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+
+ idx = vec_any_eq(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_ne(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_ge(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_gt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_le(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_lt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_nge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_ngt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_nle(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_nlt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_nan(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+ idx = vec_any_numeric(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+}
+
+void test_integer(void) {
+ vsc = vec_andc(vsc, vsc);
+ vsc = vec_andc(vsc, vbc);
+ vsc = vec_andc(vbc, vsc);
+ vuc = vec_andc(vuc, vuc);
+ vuc = vec_andc(vuc, vbc);
+ vuc = vec_andc(vbc, vuc);
+ vbc = vec_andc(vbc, vbc);
+ vss = vec_andc(vss, vss);
+ vss = vec_andc(vss, vbs);
+ vss = vec_andc(vbs, vss);
+ vus = vec_andc(vus, vus);
+ vus = vec_andc(vus, vbs);
+ vus = vec_andc(vbs, vus);
+ vbs = vec_andc(vbs, vbs);
+ vsi = vec_andc(vsi, vsi);
+ vsi = vec_andc(vsi, vbi);
+ vsi = vec_andc(vbi, vsi);
+ vui = vec_andc(vui, vui);
+ vui = vec_andc(vui, vbi);
+ vui = vec_andc(vbi, vui);
+ vbi = vec_andc(vbi, vbi);
+ vsl = vec_andc(vsl, vsl);
+ vsl = vec_andc(vsl, vbl);
+ vsl = vec_andc(vbl, vsl);
+ vul = vec_andc(vul, vul);
+ vul = vec_andc(vul, vbl);
+ vul = vec_andc(vbl, vul);
+ vbl = vec_andc(vbl, vbl);
+ vd = vec_andc(vd, vd);
+ vd = vec_andc(vd, vbl);
+ vd = vec_andc(vbl, vd);
+
+ vsc = vec_nor(vsc, vsc);
+ vsc = vec_nor(vsc, vbc);
+ vsc = vec_nor(vbc, vsc);
+ vuc = vec_nor(vuc, vuc);
+ vuc = vec_nor(vuc, vbc);
+ vuc = vec_nor(vbc, vuc);
+ vbc = vec_nor(vbc, vbc);
+ vss = vec_nor(vss, vss);
+ vss = vec_nor(vss, vbs);
+ vss = vec_nor(vbs, vss);
+ vus = vec_nor(vus, vus);
+ vus = vec_nor(vus, vbs);
+ vus = vec_nor(vbs, vus);
+ vbs = vec_nor(vbs, vbs);
+ vsi = vec_nor(vsi, vsi);
+ vsi = vec_nor(vsi, vbi);
+ vsi = vec_nor(vbi, vsi);
+ vui = vec_nor(vui, vui);
+ vui = vec_nor(vui, vbi);
+ vui = vec_nor(vbi, vui);
+ vbi = vec_nor(vbi, vbi);
+ vsl = vec_nor(vsl, vsl);
+ vsl = vec_nor(vsl, vbl);
+ vsl = vec_nor(vbl, vsl);
+ vul = vec_nor(vul, vul);
+ vul = vec_nor(vul, vbl);
+ vul = vec_nor(vbl, vul);
+ vbl = vec_nor(vbl, vbl);
+ vd = vec_nor(vd, vd);
+ vd = vec_nor(vd, vbl);
+ vd = vec_nor(vbl, vd);
+
+ vuc = vec_cntlz(vsc);
+ // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vuc = vec_cntlz(vuc);
+ // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vus = vec_cntlz(vss);
+ // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vus = vec_cntlz(vus);
+ // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vui = vec_cntlz(vsi);
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vui = vec_cntlz(vui);
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vul = vec_cntlz(vsl);
+ // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ vul = vec_cntlz(vul);
+ // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+
+ vuc = vec_cnttz(vsc);
+ // CHECK: call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vuc = vec_cnttz(vuc);
+ // CHECK: call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vus = vec_cnttz(vss);
+ // CHECK: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vus = vec_cnttz(vus);
+ // CHECK: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vui = vec_cnttz(vsi);
+ // CHECK: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vui = vec_cnttz(vui);
+ // CHECK: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vul = vec_cnttz(vsl);
+ // CHECK: call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ vul = vec_cnttz(vul);
+ // CHECK: call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %{{.*}}, i1 false)
+
+ vuc = vec_popcnt(vsc);
+ // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}})
+ vuc = vec_popcnt(vuc);
+ // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}})
+ vus = vec_popcnt(vss);
+ // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}})
+ vus = vec_popcnt(vus);
+ // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}})
+ vui = vec_popcnt(vsi);
+ // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}})
+ vui = vec_popcnt(vui);
+ // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}})
+ vul = vec_popcnt(vsl);
+ // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}})
+ vul = vec_popcnt(vul);
+ // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}})
+
+ vsc = vec_rl(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.verllvb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_rl(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.verllvb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_rl(vss, vus);
+ // CHECK: call <8 x i16> @llvm.s390.verllvh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_rl(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.verllvh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_rl(vsi, vui);
+ // CHECK: call <4 x i32> @llvm.s390.verllvf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_rl(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.verllvf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_rl(vsl, vul);
+ // CHECK: call <2 x i64> @llvm.s390.verllvg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_rl(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.verllvg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_rli(vsc, ul);
+ // CHECK: call <16 x i8> @llvm.s390.verllb(<16 x i8> %{{.*}}, i32 %{{.*}})
+ vuc = vec_rli(vuc, ul);
+ // CHECK: call <16 x i8> @llvm.s390.verllb(<16 x i8> %{{.*}}, i32 %{{.*}})
+ vss = vec_rli(vss, ul);
+ // CHECK: call <8 x i16> @llvm.s390.verllh(<8 x i16> %{{.*}}, i32 %{{.*}})
+ vus = vec_rli(vus, ul);
+ // CHECK: call <8 x i16> @llvm.s390.verllh(<8 x i16> %{{.*}}, i32 %{{.*}})
+ vsi = vec_rli(vsi, ul);
+ // CHECK: call <4 x i32> @llvm.s390.verllf(<4 x i32> %{{.*}}, i32 %{{.*}})
+ vui = vec_rli(vui, ul);
+ // CHECK: call <4 x i32> @llvm.s390.verllf(<4 x i32> %{{.*}}, i32 %{{.*}})
+ vsl = vec_rli(vsl, ul);
+ // CHECK: call <2 x i64> @llvm.s390.verllg(<2 x i64> %{{.*}}, i32 %{{.*}})
+ vul = vec_rli(vul, ul);
+ // CHECK: call <2 x i64> @llvm.s390.verllg(<2 x i64> %{{.*}}, i32 %{{.*}})
+
+ vsc = vec_rl_mask(vsc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_rl_mask(vsc, vuc, 255);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 255)
+ vuc = vec_rl_mask(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_rl_mask(vuc, vuc, 255);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 255)
+ vss = vec_rl_mask(vss, vus, 0);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vss = vec_rl_mask(vss, vus, 255);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 255)
+ vus = vec_rl_mask(vus, vus, 0);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_rl_mask(vus, vus, 255);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 255)
+ vsi = vec_rl_mask(vsi, vui, 0);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vsi = vec_rl_mask(vsi, vui, 255);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 255)
+ vui = vec_rl_mask(vui, vui, 0);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_rl_mask(vui, vui, 255);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 255)
+ vsl = vec_rl_mask(vsl, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vsl = vec_rl_mask(vsl, vul, 255);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 255)
+ vul = vec_rl_mask(vul, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vul = vec_rl_mask(vul, vul, 255);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 255)
+
+ vsc = vec_sll(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sll(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sll(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_slb(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_slb(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_slb(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_slb(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_slb(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_slb(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_slb(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_slb(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_slb(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_slb(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_slb(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_slb(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_slb(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_slb(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_slb(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_slb(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_slb(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_slb(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_sld(vsc, vsc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_sld(vsc, vsc, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vuc = vec_sld(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_sld(vuc, vuc, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vss = vec_sld(vss, vss, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_sld(vss, vss, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vus = vec_sld(vus, vus, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_sld(vus, vus, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vsi = vec_sld(vsi, vsi, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsi = vec_sld(vsi, vsi, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vui = vec_sld(vui, vui, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vui = vec_sld(vui, vui, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vsl = vec_sld(vsl, vsl, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsl = vec_sld(vsl, vsl, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vul = vec_sld(vul, vul, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vul = vec_sld(vul, vul, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vd = vec_sld(vd, vd, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vd = vec_sld(vd, vd, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+
+ vsc = vec_sldw(vsc, vsc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_sldw(vsc, vsc, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vuc = vec_sldw(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_sldw(vuc, vuc, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vss = vec_sldw(vss, vss, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_sldw(vss, vss, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vus = vec_sldw(vus, vus, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_sldw(vus, vus, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vsi = vec_sldw(vsi, vsi, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsi = vec_sldw(vsi, vsi, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vui = vec_sldw(vui, vui, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vui = vec_sldw(vui, vui, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vsl = vec_sldw(vsl, vsl, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsl = vec_sldw(vsl, vsl, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vul = vec_sldw(vul, vul, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vul = vec_sldw(vul, vul, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vd = vec_sldw(vd, vd, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vd = vec_sldw(vd, vd, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+
+ vsc = vec_sral(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sral(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sral(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srab(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srab(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srab(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srab(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srab(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srab(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srab(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srab(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srab(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srab(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srab(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srab(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srab(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srab(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srab(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srab(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srab(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srab(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srl(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srl(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srl(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srb(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srb(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srb(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srb(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srb(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srb(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srb(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srb(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srb(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srb(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srb(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srb(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srb(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srb(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srb(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srb(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srb(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srb(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_abs(vsc);
+ vss = vec_abs(vss);
+ vsi = vec_abs(vsi);
+ vsl = vec_abs(vsl);
+
+ vsc = vec_max(vsc, vsc);
+ vsc = vec_max(vsc, vbc);
+ vsc = vec_max(vbc, vsc);
+ vuc = vec_max(vuc, vuc);
+ vuc = vec_max(vuc, vbc);
+ vuc = vec_max(vbc, vuc);
+ vss = vec_max(vss, vss);
+ vss = vec_max(vss, vbs);
+ vss = vec_max(vbs, vss);
+ vus = vec_max(vus, vus);
+ vus = vec_max(vus, vbs);
+ vus = vec_max(vbs, vus);
+ vsi = vec_max(vsi, vsi);
+ vsi = vec_max(vsi, vbi);
+ vsi = vec_max(vbi, vsi);
+ vui = vec_max(vui, vui);
+ vui = vec_max(vui, vbi);
+ vui = vec_max(vbi, vui);
+ vsl = vec_max(vsl, vsl);
+ vsl = vec_max(vsl, vbl);
+ vsl = vec_max(vbl, vsl);
+ vul = vec_max(vul, vul);
+ vul = vec_max(vul, vbl);
+ vul = vec_max(vbl, vul);
+ vd = vec_max(vd, vd);
+
+ vsc = vec_min(vsc, vsc);
+ vsc = vec_min(vsc, vbc);
+ vsc = vec_min(vbc, vsc);
+ vuc = vec_min(vuc, vuc);
+ vuc = vec_min(vuc, vbc);
+ vuc = vec_min(vbc, vuc);
+ vss = vec_min(vss, vss);
+ vss = vec_min(vss, vbs);
+ vss = vec_min(vbs, vss);
+ vus = vec_min(vus, vus);
+ vus = vec_min(vus, vbs);
+ vus = vec_min(vbs, vus);
+ vsi = vec_min(vsi, vsi);
+ vsi = vec_min(vsi, vbi);
+ vsi = vec_min(vbi, vsi);
+ vui = vec_min(vui, vui);
+ vui = vec_min(vui, vbi);
+ vui = vec_min(vbi, vui);
+ vsl = vec_min(vsl, vsl);
+ vsl = vec_min(vsl, vbl);
+ vsl = vec_min(vbl, vsl);
+ vul = vec_min(vul, vul);
+ vul = vec_min(vul, vbl);
+ vul = vec_min(vbl, vul);
+ vd = vec_min(vd, vd);
+
+ vuc = vec_addc(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaccb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_addc(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vacch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_addc(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vaccf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_addc(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vaccg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_add_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_addc_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaccq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_adde_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vacq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_addec_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vacccq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_avg(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vavgb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_avg(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vavglb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_avg(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vavgh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_avg(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vavglh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_avg(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vavgf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_avg(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vavglf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_avg(vsl, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vavgg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_avg(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vavglg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vui = vec_checksum(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vcksm(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vus = vec_gfmsum(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vgfmb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_gfmsum(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vgfmh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_gfmsum(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vgfmf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_gfmsum_128(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vgfmg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vus = vec_gfmsum_accum(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vgfmab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_gfmsum_accum(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vgfmah(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_gfmsum_accum(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vgfmaf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vuc = vec_gfmsum_accum_128(vul, vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vgfmag(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_mladd(vsc, vsc, vsc);
+ vsc = vec_mladd(vuc, vsc, vsc);
+ vsc = vec_mladd(vsc, vuc, vuc);
+ vuc = vec_mladd(vuc, vuc, vuc);
+ vss = vec_mladd(vss, vss, vss);
+ vss = vec_mladd(vus, vss, vss);
+ vss = vec_mladd(vss, vus, vus);
+ vus = vec_mladd(vus, vus, vus);
+ vsi = vec_mladd(vsi, vsi, vsi);
+ vsi = vec_mladd(vui, vsi, vsi);
+ vsi = vec_mladd(vsi, vui, vui);
+ vui = vec_mladd(vui, vui, vui);
+
+ vsc = vec_mhadd(vsc, vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vmahb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_mhadd(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vmalhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_mhadd(vss, vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmahh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_mhadd(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmalhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_mhadd(vsi, vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmahf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_mhadd(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmalhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_meadd(vsc, vsc, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_meadd(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmaleb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_meadd(vss, vss, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_meadd(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmaleh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_meadd(vsi, vsi, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vmaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_meadd(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vmalef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+
+ vss = vec_moadd(vsc, vsc, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmaob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_moadd(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmalob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_moadd(vss, vss, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmaoh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_moadd(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmaloh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_moadd(vsi, vsi, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vmaof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_moadd(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vmalof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_mulh(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vmhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_mulh(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vmlhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_mulh(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_mulh(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmlhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_mulh(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_mulh(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmlhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_mule(vsc, vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vmeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_mule(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vmleb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_mule(vss, vss);
+ // CHECK: call <4 x i32> @llvm.s390.vmeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_mule(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vmleh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsl = vec_mule(vsi, vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vmef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_mule(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vmlef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_mulo(vsc, vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vmob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_mulo(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vmlob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_mulo(vss, vss);
+ // CHECK: call <4 x i32> @llvm.s390.vmoh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_mulo(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vmloh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsl = vec_mulo(vsi, vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vmof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_mulo(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vmlof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vuc = vec_subc(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vscbib(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_subc(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vscbih(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_subc(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vscbif(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_subc(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vscbig(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_sub_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_subc_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vscbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sube_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_subec_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsbcbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vui = vec_sum4(vuc, vuc);
+ // CHECK: call <4 x i32> @llvm.s390.vsumb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sum4(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vsumh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_sum2(vus, vus);
+ // CHECK: call <2 x i64> @llvm.s390.vsumgh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_sum2(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vsumgf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_sum_u128(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsumqf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_sum_u128(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsumqg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ idx = vec_test_mask(vsc, vuc);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vuc, vuc);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vss, vus);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vus, vus);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vsi, vui);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vui, vui);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vsl, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vul, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vd, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+}
+
+void test_string(void) {
+ vsc = vec_cp_until_zero(vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vuc = vec_cp_until_zero(vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vbc = vec_cp_until_zero(vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vss = vec_cp_until_zero(vss);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vus = vec_cp_until_zero(vus);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vbs = vec_cp_until_zero(vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vsi = vec_cp_until_zero(vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+ vui = vec_cp_until_zero(vui);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+ vbi = vec_cp_until_zero(vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+
+ vsc = vec_cp_until_zero_cc(vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vuc = vec_cp_until_zero_cc(vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vbc = vec_cp_until_zero_cc(vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vss = vec_cp_until_zero_cc(vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vus = vec_cp_until_zero_cc(vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vbs = vec_cp_until_zero_cc(vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vsi = vec_cp_until_zero_cc(vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+ vui = vec_cp_until_zero_cc(vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+ vbi = vec_cp_until_zero_cc(vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vbc = vec_cmprg(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_cmprg(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_cmprg(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vbc = vec_cmprg_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_cmprg_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_cmprg_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vuc = vec_cmprg_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_or_0_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrczb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_or_0_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrczh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_or_0_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrczf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_or_0_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrczbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_or_0_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrczhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_or_0_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrczfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vbc = vec_cmpnrg(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_cmpnrg(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_cmpnrg(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vbc = vec_cmpnrg_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_cmpnrg_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_cmpnrg_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vuc = vec_cmpnrg_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_or_0_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrczb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_or_0_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrczh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_or_0_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrczf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_or_0_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrczbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_or_0_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrczhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_or_0_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrczfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vbc = vec_find_any_eq(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vbc = vec_find_any_eq_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vsc = vec_find_any_eq_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vbc = vec_find_any_ne(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vbc = vec_find_any_ne_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vsc = vec_find_any_ne_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+}
+
+void test_float(void) {
+ vd = vec_abs(vd);
+ // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+
+ vd = vec_nabs(vd);
+ // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+ // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[ABS]]
+
+ vd = vec_madd(vd, vd, vd);
+ // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ vd = vec_msub(vd, vd, vd);
+ // CHECK: [[NEG:%[^ ]+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.*}}
+ // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
+ vd = vec_sqrt(vd);
+ // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{.*}})
+
+ vd = vec_ld2f(cptrf);
+ // CHECK: [[VAL:%[^ ]+]] = load <2 x float>, <2 x float>* %{{.*}}
+ // CHECK: fpext <2 x float> [[VAL]] to <2 x double>
+ vec_st2f(vd, ptrf);
+ // CHECK: [[VAL:%[^ ]+]] = fptrunc <2 x double> %{{.*}} to <2 x float>
+ // CHECK: store <2 x float> [[VAL]], <2 x float>* %{{.*}}
+
+ vd = vec_ctd(vsl, 0);
+ // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
+ vd = vec_ctd(vul, 0);
+ // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
+ vd = vec_ctd(vsl, 1);
+ // CHECK: [[VAL:%[^ ]+]] = sitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 5.000000e-01, double 5.000000e-01>
+ vd = vec_ctd(vul, 1);
+ // CHECK: [[VAL:%[^ ]+]] = uitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 5.000000e-01, double 5.000000e-01>
+ vd = vec_ctd(vsl, 31);
+ // CHECK: [[VAL:%[^ ]+]] = sitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 0x3E00000000000000, double 0x3E00000000000000>
+ vd = vec_ctd(vul, 31);
+ // CHECK: [[VAL:%[^ ]+]] = uitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 0x3E00000000000000, double 0x3E00000000000000>
+
+ vsl = vec_ctsl(vd, 0);
+ // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
+ vul = vec_ctul(vd, 0);
+ // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
+ vsl = vec_ctsl(vd, 1);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 2.000000e+00, double 2.000000e+00>
+ // CHECK: fptosi <2 x double> [[VAL]] to <2 x i64>
+ vul = vec_ctul(vd, 1);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 2.000000e+00, double 2.000000e+00>
+ // CHECK: fptoui <2 x double> [[VAL]] to <2 x i64>
+ vsl = vec_ctsl(vd, 31);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 0x41E0000000000000, double 0x41E0000000000000>
+ // CHECK: fptosi <2 x double> [[VAL]] to <2 x i64>
+ vul = vec_ctul(vd, 31);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 0x41E0000000000000, double 0x41E0000000000000>
+ // CHECK: fptoui <2 x double> [[VAL]] to <2 x i64>
+
+ vd = vec_roundp(vd);
+ // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}})
+ vd = vec_ceil(vd);
+ // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundm(vd);
+ // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}})
+ vd = vec_floor(vd);
+ // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundz(vd);
+ // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}})
+ vd = vec_trunc(vd);
+ // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundc(vd);
+ // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{.*}})
+ vd = vec_round(vd);
+ // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
+
+ vbl = vec_fp_test_data_class(vd, 0, &cc);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 0)
+ vbl = vec_fp_test_data_class(vd, 4095, &cc);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
+}
diff --git a/test/CodeGen/integer-overflow.c b/test/CodeGen/integer-overflow.c
index de3b53f4b5b3..6a7c3e51ee1b 100644
--- a/test/CodeGen/integer-overflow.c
+++ b/test/CodeGen/integer-overflow.c
@@ -72,4 +72,11 @@ void test1() {
// TRAPV: add i8 {{.*}}, 1
// CATCH_UB: add i8 {{.*}}, 1
++PR9350;
+
+ // PR24256: don't instrument __builtin_frame_address.
+ __builtin_frame_address(0 + 0);
+ // DEFAULT: call i8* @llvm.frameaddress(i32 0)
+ // WRAPV: call i8* @llvm.frameaddress(i32 0)
+ // TRAPV: call i8* @llvm.frameaddress(i32 0)
+ // CATCH_UB: call i8* @llvm.frameaddress(i32 0)
}
diff --git a/test/CodeGen/le32-regparm.c b/test/CodeGen/le32-regparm.c
index c8f70694c43d..ecb1030aa1ff 100644
--- a/test/CodeGen/le32-regparm.c
+++ b/test/CodeGen/le32-regparm.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple le32-unknown-nacl %s -fsyntax-only -verify
+// RUN: %clang_cc1 -triple aarch64 %s -fsyntax-only -verify
void __attribute__((regparm(2))) fc_f1(int i, int j, int k) {} // expected-error{{'regparm' is not valid on this platform}}
diff --git a/test/CodeGen/long_double_fp128.cpp b/test/CodeGen/long_double_fp128.cpp
new file mode 100644
index 000000000000..1780255cea97
--- /dev/null
+++ b/test/CodeGen/long_double_fp128.cpp
@@ -0,0 +1,22 @@
+// RUN: %clang_cc1 -triple x86_64-linux-android -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=A64
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=G64
+// RUN: %clang_cc1 -triple powerpc64-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=P64
+// RUN: %clang_cc1 -triple i686-linux-android -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=A32
+// RUN: %clang_cc1 -triple i686-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=G32
+// RUN: %clang_cc1 -triple powerpc-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=P32
+
+// Check mangled name of long double.
+// Android's gcc and llvm use fp128 for long double.
+void test(long, float, double, long double, long double _Complex) { }
+// A64: define void @_Z4testlfdgCg(i64, float, double, fp128, { fp128, fp128 }*
+// G64: define void @_Z4testlfdeCe(i64, float, double, x86_fp80, { x86_fp80, x86_fp80 }*
+// P64: define void @_Z4testlfdgCg(i64, float, double, ppc_fp128, ppc_fp128 {{.*}}, ppc_fp128
+// A32: define void @_Z4testlfdeCe(i32, float, double, double, { double, double }*
+// G32: define void @_Z4testlfdeCe(i32, float, double, x86_fp80, { x86_fp80, x86_fp80 }*
+// P32: define void @_Z4testlfdgCg(i32, float, double, ppc_fp128, { ppc_fp128, ppc_fp128 }*
diff --git a/test/CodeGen/palignr.c b/test/CodeGen/palignr.c
index 1712df5256ce..5a77597c3403 100644
--- a/test/CodeGen/palignr.c
+++ b/test/CodeGen/palignr.c
@@ -4,13 +4,13 @@
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
typedef __attribute__((vector_size(16))) int int4;
-// CHECK: palignr
+// CHECK: palignr $15, %xmm1, %xmm0
int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
// CHECK: ret
// CHECK: ret
// CHECK-NOT: palignr
int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); }
-// CHECK: psrldq
+// CHECK: psrldq $1, %xmm0
int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); }
// CHECK: xor
int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); }
diff --git a/test/CodeGen/x86_64-fp128.c b/test/CodeGen/x86_64-fp128.c
new file mode 100644
index 000000000000..0147721f9b6a
--- /dev/null
+++ b/test/CodeGen/x86_64-fp128.c
@@ -0,0 +1,115 @@
+// RUN: %clang_cc1 -triple x86_64-linux-android -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=ANDROID --check-prefix=CHECK
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=GNU --check-prefix=CHECK
+// RUN: %clang_cc1 -triple x86_64 -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=GNU --check-prefix=CHECK
+
+// Android uses fp128 for long double but other x86_64 targets use x86_fp80.
+
+long double dataLD = 1.0L;
+// ANDROID: @dataLD = global fp128 0xL00000000000000003FFF000000000000, align 16
+// GNU: @dataLD = global x86_fp80 0xK3FFF8000000000000000, align 16
+
+long double _Complex dataLDC = {1.0L, 1.0L};
+// ANDROID: @dataLDC = global { fp128, fp128 } { fp128 0xL00000000000000003FFF000000000000, fp128 0xL00000000000000003FFF000000000000 }, align 16
+// GNU: @dataLDC = global { x86_fp80, x86_fp80 } { x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000 }, align 16
+
+long double TestLD(long double x) {
+ return x * x;
+// ANDROID: define fp128 @TestLD(fp128 %x)
+// GNU: define x86_fp80 @TestLD(x86_fp80 %x)
+}
+
+long double _Complex TestLDC(long double _Complex x) {
+ return x * x;
+// ANDROID: define void @TestLDC({ fp128, fp128 }* {{.*}}, { fp128, fp128 }* {{.*}} %x)
+// GNU: define { x86_fp80, x86_fp80 } @TestLDC({ x86_fp80, x86_fp80 }* {{.*}} %x)
+}
+
+typedef __builtin_va_list va_list;
+
+int TestGetVarInt(va_list ap) {
+ return __builtin_va_arg(ap, int);
+// Since int can be passed in memory or register there are two branches.
+// CHECK: define i32 @TestGetVarInt(
+// CHECK: br label
+// CHECK: br label
+// CHECK: = phi
+// CHECK: ret i32
+}
+
+double TestGetVarDouble(va_list ap) {
+ return __builtin_va_arg(ap, double);
+// Since double can be passed in memory or register there are two branches.
+// CHECK: define double @TestGetVarDouble(
+// CHECK: br label
+// CHECK: br label
+// CHECK: = phi
+// CHECK: ret double
+}
+
+long double TestGetVarLD(va_list ap) {
+ return __builtin_va_arg(ap, long double);
+// fp128 can be passed in memory or in register, but x86_fp80 is in memory.
+// ANDROID: define fp128 @TestGetVarLD(
+// GNU: define x86_fp80 @TestGetVarLD(
+// ANDROID: br label
+// ANDROID: br label
+// ANDROID: = phi
+// GNU-NOT: br
+// GNU-NOT: = phi
+// ANDROID: ret fp128
+// GNU: ret x86_fp80
+}
+
+long double _Complex TestGetVarLDC(va_list ap) {
+ return __builtin_va_arg(ap, long double _Complex);
+// Pair of fp128 or x86_fp80 are passed as struct in memory.
+// ANDROID: define void @TestGetVarLDC({ fp128, fp128 }* {{.*}}, %struct.__va_list_tag*
+// GNU: define { x86_fp80, x86_fp80 } @TestGetVarLDC(
+// CHECK-NOT: br
+// CHECK-NOT: phi
+// ANDROID: ret void
+// GNU: ret { x86_fp80, x86_fp80 }
+}
+
+void TestVarArg(const char *s, ...);
+
+void TestPassVarInt(int x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarInt(i32 %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, i32 %x)
+}
+
+void TestPassVarFloat(float x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarFloat(float %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, double %
+}
+
+void TestPassVarDouble(double x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarDouble(double %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, double %x
+}
+
+void TestPassVarLD(long double x) {
+ TestVarArg("A", x);
+// ANDROID: define void @TestPassVarLD(fp128 %x)
+// ANDROID: call {{.*}} @TestVarArg(i8* {{.*}}, fp128 %x
+// GNU: define void @TestPassVarLD(x86_fp80 %x)
+// GNU: call {{.*}} @TestVarArg(i8* {{.*}}, x86_fp80 %x
+}
+
+void TestPassVarLDC(long double _Complex x) {
+ TestVarArg("A", x);
+// ANDROID: define void @TestPassVarLDC({ fp128, fp128 }* {{.*}} %x)
+// ANDROID: store fp128 %{{.*}}, fp128* %
+// ANDROID-NEXT: store fp128 %{{.*}}, fp128* %
+// ANDROID-NEXT: call {{.*}} @TestVarArg(i8* {{.*}}, { fp128, fp128 }* {{.*}} %
+// GNU: define void @TestPassVarLDC({ x86_fp80, x86_fp80 }* {{.*}} %x)
+// GNU: store x86_fp80 %{{.*}}, x86_fp80* %
+// GNU-NEXT: store x86_fp80 %{{.*}}, x86_fp80* %
+// GNGNU-NEXT: call {{.*}} @TestVarArg(i8* {{.*}}, { x86_fp80, x86_fp80 }* {{.*}} %
+}
diff --git a/test/CodeGen/zvector.c b/test/CodeGen/zvector.c
new file mode 100644
index 000000000000..ebe7e415e1db
--- /dev/null
+++ b/test/CodeGen/zvector.c
@@ -0,0 +1,2798 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector \
+// RUN: -O -emit-llvm -o - -W -Wall -Werror %s | FileCheck %s
+
+volatile vector signed char sc, sc2;
+volatile vector unsigned char uc, uc2;
+volatile vector bool char bc, bc2;
+
+volatile vector signed short ss, ss2;
+volatile vector unsigned short us, us2;
+volatile vector bool short bs, bs2;
+
+volatile vector signed int si, si2;
+volatile vector unsigned int ui, ui2;
+volatile vector bool int bi, bi2;
+
+volatile vector signed long long sl, sl2;
+volatile vector unsigned long long ul, ul2;
+volatile vector bool long long bl, bl2;
+
+volatile vector double fd, fd2;
+
+volatile int cnt;
+
+void test_assign (void)
+{
+// CHECK-LABEL: test_assign
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
+ sc = sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
+ uc = uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
+ ss = ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
+ us = us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
+ si = si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
+ ui = ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
+ sl = sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
+ ul = ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
+ fd = fd2;
+}
+
+void test_pos (void)
+{
+// CHECK-LABEL: test_pos
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
+ sc = +sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
+ uc = +uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
+ ss = +ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
+ us = +us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
+ si = +si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
+ ui = +ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
+ sl = +sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
+ ul = +ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
+ fd = +fd2;
+}
+
+void test_neg (void)
+{
+// CHECK-LABEL: test_neg
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> zeroinitializer, [[VAL]]
+ sc = -sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> zeroinitializer, [[VAL]]
+ ss = -ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> zeroinitializer, [[VAL]]
+ si = -si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> zeroinitializer, [[VAL]]
+ sl = -sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[VAL]]
+ fd = -fd2;
+}
+
+void test_preinc (void)
+{
+// CHECK-LABEL: test_preinc
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ++sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ++uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ++ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ++us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ++si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ++ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ++sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ++ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
+ ++fd2;
+}
+
+void test_postinc (void)
+{
+// CHECK-LABEL: test_postinc
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ sc2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ uc2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ss2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ us2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ si2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ui2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ sl2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ul2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
+ fd2++;
+}
+
+void test_predec (void)
+{
+// CHECK-LABEL: test_predec
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ --sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ --uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ --ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ --us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ --si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ --ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ --sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ --ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
+ --fd2;
+}
+
+void test_postdec (void)
+{
+// CHECK-LABEL: test_postdec
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ sc2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ uc2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ss2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ us2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ si2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ ui2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ sl2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ ul2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
+ fd2--;
+}
+
+void test_add (void)
+{
+// CHECK-LABEL: test_add
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc + sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc + bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc + sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc + uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc + bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc + uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss + ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss + bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs + ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = us + us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = us + bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs + us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = si + si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = si + bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi + si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui + ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui + bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi + ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl + sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl + bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl + sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul + ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul + bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl + ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd + fd2;
+}
+
+void test_add_assign (void)
+{
+// CHECK-LABEL: test_add_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ sc += sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ sc += bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ uc += uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ uc += bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ ss += ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ ss += bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ us += us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ us += bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ si += si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ si += bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ ui += ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ ui += bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ sl += sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ sl += bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ ul += ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ ul += bl2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL2]], [[VAL1]]
+ fd += fd2;
+}
+
+void test_sub (void)
+{
+// CHECK-LABEL: test_sub
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc - sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc - bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = bc - sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc - uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc - bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = bc - uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss - ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss - bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = bs - ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = us - us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = us - bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = bs - us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = si - si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = si - bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = bi - si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui - ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui - bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = bi - ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl - sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl - bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = bl - sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul - ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul - bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = bl - ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd - fd2;
+}
+
+void test_sub_assign (void)
+{
+// CHECK-LABEL: test_sub_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc -= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc -= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc -= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc -= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss -= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss -= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us -= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us -= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si -= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si -= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui -= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui -= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl -= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl -= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul -= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul -= bl2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
+ fd -= fd2;
+}
+
+void test_mul (void)
+{
+// CHECK-LABEL: test_mul
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc * sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc * uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss * ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
+ us = us * us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
+ si = si * si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui * ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl * sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul * ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fmul <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd * fd2;
+}
+
+void test_mul_assign (void)
+{
+// CHECK-LABEL: test_mul_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
+ sc *= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
+ uc *= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
+ ss *= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
+ us *= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
+ si *= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
+ ui *= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
+ sl *= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
+ ul *= ul2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fmul <2 x double> [[VAL2]], [[VAL1]]
+ fd *= fd2;
+}
+
+void test_div (void)
+{
+// CHECK-LABEL: test_div
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc / sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc / uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss / ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
+ us = us / us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
+ si = si / si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui / ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl / sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul / ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd / fd2;
+}
+
+void test_div_assign (void)
+{
+// CHECK-LABEL: test_div_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
+ sc /= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
+ uc /= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
+ ss /= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
+ us /= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
+ si /= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
+ ui /= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
+ sl /= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
+ ul /= ul2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
+ fd /= fd2;
+}
+
+void test_rem (void)
+{
+// CHECK-LABEL: test_rem
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc % sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc % uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss % ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
+ us = us % us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
+ si = si % si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui % ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl % sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul % ul2;
+}
+
+void test_rem_assign (void)
+{
+// CHECK-LABEL: test_rem_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
+ sc %= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
+ uc %= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
+ ss %= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
+ us %= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
+ si %= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
+ ui %= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
+ sl %= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
+ ul %= ul2;
+}
+
+void test_not (void)
+{
+// CHECK-LABEL: test_not
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ sc = ~sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ uc = ~uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ bc = ~bc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ss = ~ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ us = ~us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ bs = ~bs2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ si = ~si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ ui = ~ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ bi = ~bi2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ sl = ~sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ ul = ~ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ bl = ~bl2;
+}
+
+void test_and (void)
+{
+// CHECK-LABEL: test_and
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc & sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc & bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc & sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc & uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc & bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc & uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ bc = bc & bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss & ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss & bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs & ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = us & us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = us & bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs & us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ bs = bs & bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = si & si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = si & bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi & si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui & ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui & bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi & ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ bi = bi & bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl & sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl & bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl & sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul & ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul & bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl & ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ bl = bl & bl2;
+}
+
+void test_and_assign (void)
+{
+// CHECK-LABEL: test_and_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ sc &= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ sc &= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ uc &= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ uc &= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ bc &= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ ss &= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ ss &= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ us &= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ us &= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ bs &= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ si &= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ si &= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ ui &= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ ui &= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ bi &= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ sl &= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ sl &= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ ul &= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ ul &= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ bl &= bl2;
+}
+
+void test_or (void)
+{
+// CHECK-LABEL: test_or
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc | sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc | bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc | sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc | uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc | bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc | uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ bc = bc | bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss | ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss | bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs | ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = us | us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = us | bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs | us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ bs = bs | bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = si | si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = si | bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi | si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui | ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui | bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi | ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ bi = bi | bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl | sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl | bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl | sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul | ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul | bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl | ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ bl = bl | bl2;
+}
+
+void test_or_assign (void)
+{
+// CHECK-LABEL: test_or_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ sc |= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ sc |= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ uc |= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ uc |= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ bc |= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ ss |= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ ss |= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ us |= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ us |= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ bs |= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ si |= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ si |= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ ui |= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ ui |= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ bi |= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ sl |= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ sl |= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ ul |= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ ul |= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ bl |= bl2;
+}
+
+void test_xor (void)
+{
+// CHECK-LABEL: test_xor
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc ^ sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc ^ bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = bc ^ sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc ^ uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc ^ bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = bc ^ uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ bc = bc ^ bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss ^ ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss ^ bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = bs ^ ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = us ^ us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = us ^ bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = bs ^ us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ bs = bs ^ bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = si ^ si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = si ^ bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = bi ^ si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui ^ ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui ^ bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = bi ^ ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ bi = bi ^ bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl ^ sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl ^ bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = bl ^ sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul ^ ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul ^ bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = bl ^ ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ bl = bl ^ bl2;
+}
+
+void test_xor_assign (void)
+{
+// CHECK-LABEL: test_xor_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ sc ^= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ sc ^= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ uc ^= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ uc ^= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ bc ^= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ ss ^= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ ss ^= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ us ^= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ us ^= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ bs ^= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ si ^= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ si ^= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ ui ^= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ ui ^= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ bi ^= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ sl ^= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ sl ^= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ ul ^= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ ul ^= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ bl ^= bl2;
+}
+
+void test_sl (void)
+{
+// CHECK-LABEL: test_sl
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc = sc << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc = uc << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss = ss << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us = us << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si = si << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui = ui << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl = sl << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul = ul << 5;
+}
+
+void test_sl_assign (void)
+{
+// CHECK-LABEL: test_sl_assign
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul <<= 5;
+}
+
+void test_sr (void)
+{
+// CHECK-LABEL: test_sr
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc = sc >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc = uc >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss = ss >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us = us >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si = si >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui = ui >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl = sl >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul = ul >> 5;
+}
+
+void test_sr_assign (void)
+{
+// CHECK-LABEL: test_sr_assign
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul >>= 5;
+}
+
+
+void test_cmpeq (void)
+{
+// CHECK-LABEL: test_cmpeq
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc == sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc == bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc == uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc == bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss == ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss == bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us == us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us == bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si == si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si == bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui == ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui == bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl == sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl == bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul == ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul == bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp oeq <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd == fd2;
+}
+
+void test_cmpne (void)
+{
+// CHECK-LABEL: test_cmpne
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc != sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc != bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc != uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc != bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss != ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss != bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us != us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us != bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si != si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si != bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui != ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui != bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl != sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl != bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul != ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul != bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp une <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd != fd2;
+}
+
+void test_cmpge (void)
+{
+// CHECK-LABEL: test_cmpge
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc >= sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc >= uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc >= bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss >= ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us >= us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs >= bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si >= si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui >= ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi >= bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl >= sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul >= ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl >= bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp oge <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd >= fd2;
+}
+
+void test_cmpgt (void)
+{
+// CHECK-LABEL: test_cmpgt
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc > sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc > uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc > bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss > ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us > us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs > bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si > si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui > ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi > bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl > sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul > ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl > bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp ogt <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd > fd2;
+}
+
+void test_cmple (void)
+{
+// CHECK-LABEL: test_cmple
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc <= sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc <= uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc <= bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss <= ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us <= us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs <= bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si <= si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui <= ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi <= bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl <= sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul <= ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl <= bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp ole <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd <= fd2;
+}
+
+void test_cmplt (void)
+{
+// CHECK-LABEL: test_cmplt
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc < sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc < uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc < bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss < ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us < us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs < bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si < si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui < ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi < bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl < sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul < ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl < bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp olt <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd < fd2;
+}
+
diff --git a/test/CodeGenCXX/dllimport-rtti.cpp b/test/CodeGenCXX/dllimport-rtti.cpp
index 8c0f86306cde..6fe67daa38d7 100644
--- a/test/CodeGenCXX/dllimport-rtti.cpp
+++ b/test/CodeGenCXX/dllimport-rtti.cpp
@@ -15,3 +15,16 @@ struct __declspec(dllimport) S {
struct U : S {
} u;
+
+struct __declspec(dllimport) V {
+ virtual void f();
+} v;
+// GNU-DAG: @_ZTV1V = external dllimport
+
+struct W {
+ __declspec(dllimport) virtual void f();
+ virtual void g();
+} w;
+// GNU-DAG: @_ZTV1W = linkonce_odr
+// GNU-DAG: @_ZTS1W = linkonce_odr
+// GNU-DAG: @_ZTI1W = linkonce_odr
diff --git a/test/CodeGenCXX/thunks.cpp b/test/CodeGenCXX/thunks.cpp
index 0e97255cc6fa..38afb9d0dbf7 100644
--- a/test/CodeGenCXX/thunks.cpp
+++ b/test/CodeGenCXX/thunks.cpp
@@ -361,6 +361,23 @@ namespace Test15 {
// CHECK: declare void @_ZThn8_N6Test151C1fEiz
}
+namespace Test16 {
+struct A {
+ virtual ~A();
+};
+struct B {
+ virtual void foo();
+};
+struct C : public A, public B {
+ void foo() {}
+};
+struct D : public C {
+ ~D();
+};
+D::~D() {}
+// CHECK: define linkonce_odr void @_ZThn8_N6Test161C3fooEv({{.*}}) {{.*}} comdat
+}
+
/**** The following has to go at the end of the file ****/
// This is from Test5:
diff --git a/test/CodeGenObjC/Inputs/nsvalue-boxed-expressions-support.h b/test/CodeGenObjC/Inputs/nsvalue-boxed-expressions-support.h
new file mode 100644
index 000000000000..cbcf882b27e3
--- /dev/null
+++ b/test/CodeGenObjC/Inputs/nsvalue-boxed-expressions-support.h
@@ -0,0 +1,63 @@
+#ifndef NSVALUE_BOXED_EXPRESSIONS_SUPPORT_H
+#define NSVALUE_BOXED_EXPRESSIONS_SUPPORT_H
+
+#define BOXABLE __attribute__((objc_boxable))
+
+typedef unsigned long NSUInteger;
+typedef double CGFloat;
+
+typedef struct BOXABLE _NSRange {
+ NSUInteger location;
+ NSUInteger length;
+} NSRange;
+
+typedef struct BOXABLE _NSPoint {
+ CGFloat x;
+ CGFloat y;
+} NSPoint;
+
+typedef struct BOXABLE _NSSize {
+ CGFloat width;
+ CGFloat height;
+} NSSize;
+
+typedef struct BOXABLE _NSRect {
+ NSPoint origin;
+ NSSize size;
+} NSRect;
+
+struct CGPoint {
+ CGFloat x;
+ CGFloat y;
+};
+typedef struct BOXABLE CGPoint CGPoint;
+
+struct CGSize {
+ CGFloat width;
+ CGFloat height;
+};
+typedef struct BOXABLE CGSize CGSize;
+
+struct CGRect {
+ CGPoint origin;
+ CGSize size;
+};
+typedef struct BOXABLE CGRect CGRect;
+
+struct NSEdgeInsets {
+ CGFloat top;
+ CGFloat left;
+ CGFloat bottom;
+ CGFloat right;
+};
+typedef struct BOXABLE NSEdgeInsets NSEdgeInsets;
+
+@interface NSValue
+
++ (NSValue *)valueWithBytes:(const void *)value objCType:(const char *)type;
+
+@end
+
+NSRange getRange();
+
+#endif // NSVALUE_BOXED_EXPRESSIONS_SUPPORT_H
diff --git a/test/CodeGenObjC/nsvalue-objc-boxable-ios-arc.m b/test/CodeGenObjC/nsvalue-objc-boxable-ios-arc.m
new file mode 100644
index 000000000000..75e04dbd0272
--- /dev/null
+++ b/test/CodeGenObjC/nsvalue-objc-boxable-ios-arc.m
@@ -0,0 +1,126 @@
+// RUN: %clang_cc1 -I %S/Inputs -triple armv7-apple-ios8.0.0 -emit-llvm -fobjc-arc -O2 -disable-llvm-optzns -o - %s | FileCheck %s
+
+#import "nsvalue-boxed-expressions-support.h"
+
+// CHECK: [[CLASS:@.*]] = external global %struct._class_t
+// CHECK: [[NSVALUE:@.*]] = {{.*}}[[CLASS]]{{.*}}
+// CHECK: [[RANGE_STR:.*]] = {{.*}}_NSRange=II{{.*}}
+// CHECK: [[METH:@.*]] = private global{{.*}}valueWithBytes:objCType:{{.*}}
+// CHECK: [[VALUE_SEL:@.*]] = {{.*}}[[METH]]{{.*}}
+// CHECK: [[POINT_STR:.*]] = {{.*}}CGPoint=dd{{.*}}
+// CHECK: [[SIZE_STR:.*]] = {{.*}}CGSize=dd{{.*}}
+// CHECK: [[RECT_STR:.*]] = {{.*}}CGRect={CGPoint=dd}{CGSize=dd}}{{.*}}
+// CHECK: [[EDGE_STR:.*]] = {{.*}}NSEdgeInsets=dddd{{.*}}
+
+// CHECK-LABEL: define void @doRange()
+void doRange() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRange* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSRange ns_range = { .location = 0, .length = 42 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *range = @(ns_range);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doPoint()
+void doPoint() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGPoint{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGPoint{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGPoint* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGPoint cg_point = { .x = 42, .y = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[POINT_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *point = @(cg_point);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doSize()
+void doSize() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGSize{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGSize{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGSize* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGSize cg_size = { .width = 42, .height = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[SIZE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *size = @(cg_size);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRect()
+void doRect() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGRect{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGRect{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGRect* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGPoint cg_point = { .x = 42, .y = 24 };
+ CGSize cg_size = { .width = 42, .height = 24 };
+ CGRect cg_rect = { .origin = cg_point, .size = cg_size };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[RECT_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *rect = @(cg_rect);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doNSEdgeInsets()
+void doNSEdgeInsets() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSEdgeInsets ns_edge_insets;
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[EDGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *edge_insets = @(ns_edge_insets);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRangeRValue()
+void doRangeRValue() {
+ // CHECK: [[COERCE:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: call {{.*}} @getRange {{.*}} [[COERCE]]{{.*}}
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[COERCE_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *range_rvalue = @(getRange());
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
diff --git a/test/CodeGenObjC/nsvalue-objc-boxable-ios.m b/test/CodeGenObjC/nsvalue-objc-boxable-ios.m
new file mode 100644
index 000000000000..3443babdc9d0
--- /dev/null
+++ b/test/CodeGenObjC/nsvalue-objc-boxable-ios.m
@@ -0,0 +1,114 @@
+// RUN: %clang_cc1 -I %S/Inputs -triple armv7-apple-ios8.0.0 -emit-llvm -O2 -disable-llvm-optzns -o - %s | FileCheck %s
+
+#import "nsvalue-boxed-expressions-support.h"
+
+// CHECK: [[CLASS:@.*]] = external global %struct._class_t
+// CHECK: [[NSVALUE:@.*]] = {{.*}}[[CLASS]]{{.*}}
+// CHECK: [[RANGE_STR:.*]] = {{.*}}_NSRange=II{{.*}}
+// CHECK: [[METH:@.*]] = private global{{.*}}valueWithBytes:objCType:{{.*}}
+// CHECK: [[VALUE_SEL:@.*]] = {{.*}}[[METH]]{{.*}}
+// CHECK: [[POINT_STR:.*]] = {{.*}}CGPoint=dd{{.*}}
+// CHECK: [[SIZE_STR:.*]] = {{.*}}CGSize=dd{{.*}}
+// CHECK: [[RECT_STR:.*]] = {{.*}}CGRect={CGPoint=dd}{CGSize=dd}}{{.*}}
+// CHECK: [[EDGE_STR:.*]] = {{.*}}NSEdgeInsets=dddd{{.*}}
+
+// CHECK-LABEL: define void @doRange()
+void doRange() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRange* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSRange ns_range = { .location = 0, .length = 42 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ NSValue *range = @(ns_range);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doPoint()
+void doPoint() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGPoint{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGPoint{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGPoint* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGPoint cg_point = { .x = 42, .y = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[POINT_STR]]{{.*}})
+ NSValue *point = @(cg_point);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doSize()
+void doSize() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGSize{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGSize{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGSize* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGSize cg_size = { .width = 42, .height = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[SIZE_STR]]{{.*}})
+ NSValue *size = @(cg_size);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRect()
+void doRect() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGRect{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGRect{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGRect* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGPoint cg_point = { .x = 42, .y = 24 };
+ CGSize cg_size = { .width = 42, .height = 24 };
+ CGRect cg_rect = { .origin = cg_point, .size = cg_size };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[RECT_STR]]{{.*}})
+ NSValue *rect = @(cg_rect);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doNSEdgeInsets()
+void doNSEdgeInsets() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSEdgeInsets ns_edge_insets;
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[EDGE_STR]]{{.*}})
+ NSValue *edge_insets = @(ns_edge_insets);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRangeRValue()
+void doRangeRValue() {
+ // CHECK: [[COERCE:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: call {{.*}} @getRange {{.*}} [[COERCE]]{{.*}}
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[COERCE_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ NSValue *range_rvalue = @(getRange());
+ // CHECK: ret void
+}
+
diff --git a/test/CodeGenObjC/nsvalue-objc-boxable-mac-arc.m b/test/CodeGenObjC/nsvalue-objc-boxable-mac-arc.m
new file mode 100644
index 000000000000..92c91d7d3810
--- /dev/null
+++ b/test/CodeGenObjC/nsvalue-objc-boxable-mac-arc.m
@@ -0,0 +1,130 @@
+// RUN: %clang_cc1 -I %S/Inputs -triple x86_64-apple-macosx -emit-llvm -fobjc-arc -O2 -disable-llvm-optzns -o - %s | FileCheck %s
+
+#import "nsvalue-boxed-expressions-support.h"
+
+// CHECK: [[CLASS:@.*]] = external global %struct._class_t
+// CHECK: [[NSVALUE:@.*]] = {{.*}}[[CLASS]]{{.*}}
+// CHECK: [[RANGE_STR:.*]] = {{.*}}_NSRange=QQ{{.*}}
+// CHECK: [[METH:@.*]] = private global{{.*}}valueWithBytes:objCType:{{.*}}
+// CHECK: [[VALUE_SEL:@.*]] = {{.*}}[[METH]]{{.*}}
+// CHECK: [[POINT_STR:.*]] = {{.*}}_NSPoint=dd{{.*}}
+// CHECK: [[SIZE_STR:.*]] = {{.*}}_NSSize=dd{{.*}}
+// CHECK: [[RECT_STR:.*]] = {{.*}}_NSRect={_NSPoint=dd}{_NSSize=dd}}{{.*}}
+// CHECK: [[EDGE_STR:.*]] = {{.*}}NSEdgeInsets=dddd{{.*}}
+
+// CHECK-LABEL: define void @doRange()
+void doRange() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRange* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSRange ns_range = { .location = 0, .length = 42 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *range = @(ns_range);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doPoint()
+void doPoint() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSPoint{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSPoint{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSPoint* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSPoint ns_point = { .x = 42, .y = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[POINT_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *point = @(ns_point);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doSize()
+void doSize() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSSize{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSSize{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSSize* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSSize ns_size = { .width = 42, .height = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[SIZE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *size = @(ns_size);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRect()
+void doRect() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRect{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRect{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRect* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSPoint ns_point = { .x = 42, .y = 24 };
+ NSSize ns_size = { .width = 42, .height = 24 };
+ NSRect ns_rect = { .origin = ns_point, .size = ns_size };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[RECT_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *rect = @(ns_rect);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doNSEdgeInsets()
+void doNSEdgeInsets() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSEdgeInsets ns_edge_insets;
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[EDGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *edge_insets = @(ns_edge_insets);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRangeRValue()
+void doRangeRValue() {
+ // CHECK: [[COERCE:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[RVAL:%.*]] = call {{.*}} @getRange()
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[COERCE_CAST_PTR:%.*]] = getelementptr {{.*}} [[COERCE_CAST]], {{.*}}
+ // CHECK: [[EXTR_RVAL:%.*]] = extractvalue {{.*}} [[RVAL]]{{.*}}
+ // CHECK: store {{.*}}[[EXTR_RVAL]]{{.*}}[[COERCE_CAST_PTR]]{{.*}}
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[COERCE_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *range_rvalue = @(getRange());
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
diff --git a/test/CodeGenObjC/nsvalue-objc-boxable-mac.m b/test/CodeGenObjC/nsvalue-objc-boxable-mac.m
new file mode 100644
index 000000000000..da0ae7a57108
--- /dev/null
+++ b/test/CodeGenObjC/nsvalue-objc-boxable-mac.m
@@ -0,0 +1,118 @@
+// RUN: %clang_cc1 -I %S/Inputs -triple x86_64-apple-macosx -emit-llvm -O2 -disable-llvm-optzns -o - %s | FileCheck %s
+
+#import "nsvalue-boxed-expressions-support.h"
+
+// CHECK: [[CLASS:@.*]] = external global %struct._class_t
+// CHECK: [[NSVALUE:@.*]] = {{.*}}[[CLASS]]{{.*}}
+// CHECK: [[RANGE_STR:.*]] = {{.*}}_NSRange=QQ{{.*}}
+// CHECK: [[METH:@.*]] = private global{{.*}}valueWithBytes:objCType:{{.*}}
+// CHECK: [[VALUE_SEL:@.*]] = {{.*}}[[METH]]{{.*}}
+// CHECK: [[POINT_STR:.*]] = {{.*}}_NSPoint=dd{{.*}}
+// CHECK: [[SIZE_STR:.*]] = {{.*}}_NSSize=dd{{.*}}
+// CHECK: [[RECT_STR:.*]] = {{.*}}_NSRect={_NSPoint=dd}{_NSSize=dd}}{{.*}}
+// CHECK: [[EDGE_STR:.*]] = {{.*}}NSEdgeInsets=dddd{{.*}}
+
+// CHECK-LABEL: define void @doRange()
+void doRange() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRange* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSRange ns_range = { .location = 0, .length = 42 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ NSValue *range = @(ns_range);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doPoint()
+void doPoint() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSPoint{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSPoint{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSPoint* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSPoint ns_point = { .x = 42, .y = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[POINT_STR]]{{.*}})
+ NSValue *point = @(ns_point);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doSize()
+void doSize() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSSize{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSSize{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSSize* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSSize ns_size = { .width = 42, .height = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[SIZE_STR]]{{.*}})
+ NSValue *size = @(ns_size);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRect()
+void doRect() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRect{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRect{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRect* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSPoint ns_point = { .x = 42, .y = 24 };
+ NSSize ns_size = { .width = 42, .height = 24 };
+ NSRect ns_rect = { .origin = ns_point, .size = ns_size };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[RECT_STR]]{{.*}})
+ NSValue *rect = @(ns_rect);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doNSEdgeInsets()
+void doNSEdgeInsets() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSEdgeInsets ns_edge_insets;
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[EDGE_STR]]{{.*}})
+ NSValue *edge_insets = @(ns_edge_insets);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRangeRValue()
+void doRangeRValue() {
+ // CHECK: [[COERCE:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[RVAL:%.*]] = call {{.*}} @getRange()
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[COERCE_CAST_PTR:%.*]] = getelementptr {{.*}} [[COERCE_CAST]], {{.*}}
+ // CHECK: [[EXTR_RVAL:%.*]] = extractvalue {{.*}} [[RVAL]]{{.*}}
+ // CHECK: store {{.*}}[[EXTR_RVAL]]{{.*}}[[COERCE_CAST_PTR]]{{.*}}
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[COERCE_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ NSValue *range_rvalue = @(getRange());
+ // CHECK: ret void
+}
+
diff --git a/test/CodeGenObjCXX/designated-initializers.mm b/test/CodeGenObjCXX/designated-initializers.mm
new file mode 100644
index 000000000000..41a4271bc8c5
--- /dev/null
+++ b/test/CodeGenObjCXX/designated-initializers.mm
@@ -0,0 +1,36 @@
+// RUN: %clang_cc1 -triple arm64 %s -verify -emit-llvm -o - | FileCheck %s
+// expected-no-diagnostics
+
+// Make sure we don't enter an infinite loop (rdar://21942503)
+
+int vals1[] = {
+ [__objc_yes] = 1,
+ [__objc_no] = 2
+};
+// CHECK: @vals1 = global [2 x i32] [i32 2, i32 1]
+
+int vals2[] = {
+ [true] = 3,
+ [false] = 4
+};
+// CHECK: @vals2 = global [2 x i32] [i32 4, i32 3]
+
+int vals3[] = {
+ [false] = 1,
+ [true] = 2,
+ 5
+};
+// CHECK: @vals3 = global [3 x i32] [i32 1, i32 2, i32 5]
+
+int vals4[2] = {
+ [true] = 5,
+ [false] = 6
+};
+// CHECK: @vals4 = global [2 x i32] [i32 6, i32 5]
+
+int vals5[3] = {
+ [false] = 1,
+ [true] = 2,
+ 6
+};
+// CHECK: @vals5 = global [3 x i32] [i32 1, i32 2, i32 6]
diff --git a/test/CodeGenOpenCL/vector_shufflevector_valid.cl b/test/CodeGenOpenCL/vector_shufflevector_valid.cl
new file mode 100644
index 000000000000..0953c66f58e2
--- /dev/null
+++ b/test/CodeGenOpenCL/vector_shufflevector_valid.cl
@@ -0,0 +1,13 @@
+// RUN: %clang_cc1 -emit-llvm -O0 %s -o - | FileCheck %s
+
+// The shuffle vector mask must always be of i32 vector type
+// See http://reviews.llvm.org/D10838 and https://llvm.org/bugs/show_bug.cgi?id=23800#c2
+// for more information about a bug where a 64 bit index operand causes the generation
+// of an invalid mask
+
+typedef unsigned int uint2 __attribute((ext_vector_type(2)));
+
+void vector_shufflevector_valid(void) {
+ //CHECK: {{%.*}} = shufflevector <2 x i32> {{%.*}}, <2 x i32> undef, <2 x i32> <i32 0, i32 undef>
+ (uint2)(((uint2)(0)).s0, 0);
+}
diff --git a/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/backward/.keep b/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/backward/.keep
diff --git a/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32/.keep b/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed/.keep b/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include/.keep b/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include/.keep
diff --git a/test/Driver/Inputs/mingw_clang_tree/mingw32/i686-w64-mingw32/include/.keep b/test/Driver/Inputs/mingw_clang_tree/mingw32/i686-w64-mingw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_clang_tree/mingw32/i686-w64-mingw32/include/.keep
diff --git a/test/Driver/Inputs/mingw_clang_tree/mingw32/include/.keep b/test/Driver/Inputs/mingw_clang_tree/mingw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_clang_tree/mingw32/include/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/backward/.keep b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/backward/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/i686-w64-mingw32/.keep b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/i686-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/i686-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include-fixed/.keep b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include/.keep b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/include/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/include/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include-fixed/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/backward/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/backward/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/mingw32/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/minw32/include/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/minw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/minw32/include/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/i686-w64-mingw32/include/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/i686-w64-mingw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/i686-w64-mingw32/include/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/backward/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/backward/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/i686-w64-mingw32/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/i686-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/i686-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include-fixed/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include/.keep
diff --git a/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed/.keep b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward/.keep b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward/.keep
diff --git a/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32/.keep b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_opensuse_tree/usr/x86_64-w64-mingw32/sys-root/mingw/include/.keep b/test/Driver/Inputs/mingw_opensuse_tree/usr/x86_64-w64-mingw32/sys-root/mingw/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_opensuse_tree/usr/x86_64-w64-mingw32/sys-root/mingw/include/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/86_64-w64-mingw32/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/86_64-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/86_64-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/backward/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/backward/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/x86_64-w64-mingw32/include/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/x86_64-w64-mingw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/x86_64-w64-mingw32/include/.keep
diff --git a/test/Driver/arm-ias-Wa.s b/test/Driver/arm-ias-Wa.s
new file mode 100644
index 000000000000..6eadd3b841d5
--- /dev/null
+++ b/test/Driver/arm-ias-Wa.s
@@ -0,0 +1,64 @@
+// Test that different values of -Wa,-mcpu/mfpu/march/mhwdiv pick correct ARM target-feature(s).
+// Complete tests about -mcpu/mfpu/march/mhwdiv on other files.
+
+// CHECK-DUP-CPU: warning: argument unused during compilation: '-mcpu=cortex-a8'
+// CHECK-DUP-FPU: warning: argument unused during compilation: '-mfpu=vfpv3'
+// CHECK-DUP-ARCH: warning: argument unused during compilation: '-march=armv7'
+// CHECK-DUP-HDIV: warning: argument unused during compilation: '-mhwdiv=arm'
+
+// CHECK: "cc1as"
+// ================================================================= CPU
+// RUN: %clang -target arm-linux-gnueabi -Wa,-mcpu=cortex-a15 -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-CPU %s
+// CHECK-CPU: "-target-cpu" "cortex-a15"
+
+// RUN: %clang -target arm -Wa,-mcpu=bogus -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-BOGUS-CPU %s
+// CHECK-BOGUS-CPU: error: {{.*}} does not support '-Wa,-mcpu=bogus'
+
+// RUN: %clang -target arm -mcpu=cortex-a8 -Wa,-mcpu=cortex-a15 -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-DUP-CPU %s
+// CHECK-DUP-CPU: "-target-cpu" "cortex-a15"
+
+// ================================================================= FPU
+// RUN: %clang -target arm-linux-eabi -Wa,-mfpu=neon -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-NEON %s
+// CHECK-NEON: "-target-feature" "+neon"
+
+// RUN: %clang -target arm-linux-eabi -Wa,-mfpu=bogus -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-BOGUS-FPU %s
+// CHECK-BOGUS-FPU: error: {{.*}} does not support '-Wa,-mfpu=bogus'
+
+// RUN: %clang -target arm-linux-eabi -mfpu=vfpv3 -Wa,-mfpu=neon -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-DUP-FPU %s
+// CHECK-DUP-FPU: "-target-feature" "+neon"
+
+// ================================================================= Arch
+// Arch validation only for now, in case we're passing to an external asm
+
+// RUN: %clang -target arm -Wa,-march=armbogusv6 -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-BOGUS-ARCH %s
+// CHECK-BOGUS-ARCH: error: {{.*}} does not support '-Wa,-march=armbogusv6'
+
+// RUN: %clang -target arm -march=armv7 -Wa,-march=armv6 -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-DUP-ARCH %s
+
+// ================================================================= HD Div
+// RUN: %clang -target arm -Wa,-mhwdiv=arm -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-ARM %s
+// CHECK-ARM: "-target-feature" "+hwdiv-arm"
+// CHECK-ARM: "-target-feature" "-hwdiv"
+
+// RUN: %clang -target arm -Wa,-mhwdiv=thumb -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-THUMB %s
+// CHECK-THUMB: "-target-feature" "-hwdiv-arm"
+// CHECK-THUMB: "-target-feature" "+hwdiv"
+
+// RUN: %clang -target arm -Wa,-mhwdiv=bogus -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-BOGUS-HDIV %s
+// CHECK-BOGUS-HDIV: error: {{.*}} does not support '-Wa,-mhwdiv=bogus'
+
+// RUN: %clang -target arm -mhwdiv=arm -Wa,-mhwdiv=thumb -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-DUP-HDIV %s
+// CHECK-DUP-HDIV: "-target-feature" "-hwdiv-arm"
+// CHECK-DUP-HDIV: "-target-feature" "+hwdiv"
diff --git a/test/Driver/cuda-options.cu b/test/Driver/cuda-options.cu
index 90bdea4499c8..65d4913a159e 100644
--- a/test/Driver/cuda-options.cu
+++ b/test/Driver/cuda-options.cu
@@ -4,7 +4,7 @@
// REQUIRES: nvptx-registered-target
// Simple compilation case:
-// RUN: %clang -### -target=x86_64-linux-gnu -c %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu -c %s 2>&1 \
// Compile device-side to PTX assembly and make sure we use it on the host side.
// RUN: | FileCheck -check-prefix CUDA-D1 \
// Then compile host side and incorporate device code.
@@ -13,7 +13,7 @@
// RUN: -check-prefix CUDA-NL %s
// Typical compilation + link case:
-// RUN: %clang -### -target=x86_64-linux-gnu %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu %s 2>&1 \
// Compile device-side to PTX assembly and make sure we use it on the host side
// RUN: | FileCheck -check-prefix CUDA-D1 \
// Then compile host side and incorporate device code.
@@ -22,7 +22,7 @@
// RUN: -check-prefix CUDA-L %s
// Verify that -cuda-no-device disables device-side compilation and linking
-// RUN: %clang -### -target=x86_64-linux-gnu --cuda-host-only %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu --cuda-host-only %s 2>&1 \
// Make sure we didn't run device-side compilation.
// RUN: | FileCheck -check-prefix CUDA-ND \
// Then compile host side and make sure we don't attempt to incorporate GPU code.
@@ -31,7 +31,7 @@
// RUN: -check-prefix CUDA-NL %s
// Verify that -cuda-no-host disables host-side compilation and linking
-// RUN: %clang -### -target=x86_64-linux-gnu --cuda-device-only %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu --cuda-device-only %s 2>&1 \
// Compile device-side to PTX assembly
// RUN: | FileCheck -check-prefix CUDA-D1 \
// Make sure there are no host cmpilation or linking.
@@ -39,7 +39,7 @@
// Verify that with -S we compile host and device sides to assembly
// and incorporate device code on the host side.
-// RUN: %clang -### -target=x86_64-linux-gnu -S -c %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu -S -c %s 2>&1 \
// Compile device-side to PTX assembly
// RUN: | FileCheck -check-prefix CUDA-D1 \
// Then compile host side and incorporate GPU code.
@@ -49,7 +49,7 @@
// Verify that --cuda-gpu-arch option passes correct GPU
// archtecture info to device compilation.
-// RUN: %clang -### -target=x86_64-linux-gnu --cuda-gpu-arch=sm_35 -c %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm_35 -c %s 2>&1 \
// Compile device-side to PTX assembly.
// RUN: | FileCheck -check-prefix CUDA-D1 -check-prefix CUDA-D1-SM35 \
// Then compile host side and incorporate GPU code.
@@ -59,7 +59,7 @@
// Verify that there is device-side compilation per --cuda-gpu-arch args
// and that all results are included on the host side.
-// RUN: %clang -### -target=x86_64-linux-gnu --cuda-gpu-arch=sm_35 --cuda-gpu-arch=sm_30 -c %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm_35 --cuda-gpu-arch=sm_30 -c %s 2>&1 \
// Compile both device-sides to PTX assembly
// RUN: | FileCheck \
// RUN: -check-prefix CUDA-D1 -check-prefix CUDA-D1-SM35 \
diff --git a/test/Driver/linux-ld.c b/test/Driver/linux-ld.c
index 5e865a953c7c..a976b388d3cc 100644
--- a/test/Driver/linux-ld.c
+++ b/test/Driver/linux-ld.c
@@ -618,6 +618,13 @@
// CHECK-ARM: "-dynamic-linker" "{{.*}}/lib/ld-linux.so.3"
//
// RUN: %clang %s -### -o %t.o 2>&1 \
+// RUN: --target=arm-linux-gnueabi -mfloat-abi=hard \
+// RUN: | FileCheck --check-prefix=CHECK-ARM-ABIHF %s
+// CHECK-ARM-ABIHF: "{{.*}}ld{{(.exe)?}}"
+// CHECK-ARM-ABIHF: "-m" "armelf_linux_eabi"
+// CHECK-ARM-ABIHF: "-dynamic-linker" "{{.*}}/lib/ld-linux-armhf.so.3"
+//
+// RUN: %clang %s -### -o %t.o 2>&1 \
// RUN: --target=arm-linux-gnueabihf \
// RUN: | FileCheck --check-prefix=CHECK-ARM-HF %s
// CHECK-ARM-HF: "{{.*}}ld{{(.exe)?}}"
diff --git a/test/Driver/mingw.cpp b/test/Driver/mingw.cpp
new file mode 100644
index 000000000000..8dc5b96034ce
--- /dev/null
+++ b/test/Driver/mingw.cpp
@@ -0,0 +1,59 @@
+// RUN: %clang -target i686-windows-gnu -c -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_CLANG_TREE %s
+// CHECK_MINGW_CLANG_TREE: "{{.*}}/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
+// CHECK_MINGW_CLANG_TREE: "{{.*}}/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}include"
+
+
+// RUN: %clang -target i686-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_mingw_org_tree/mingw %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_ORG_TREE %s
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++"
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}mingw32"
+// CHECK_MINGW_ORG_TREE: "{{.*}}{{/|\\\\}}Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include"
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include-fixed"
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}mingw32{{/|\\\\}}include"
+// CHECK_MINGW_ORG_TREE: {{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}include
+
+
+// RUN: %clang -target i686-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_mingw_builds_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_BUILDS_TREE %s
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}i686-w64-mingw32"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}4.9.1{{/|\\\\}}include"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}4.9.1{{/|\\\\}}include-fixed"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
+
+
+// RUN: %clang -target i686-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_msys2_tree/msys64/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_MSYS_TREE %s
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64{{/|\\\\}}mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2{{/|\\\\}}i686-w64-mingw32"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2{{/|\\\\}}backward"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}4.9.2{{/|\\\\}}include"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}4.9.2{{/|\\\\}}include-fixed"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include"
+
+
+// RUN: %clang -target x86_64-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_opensuse_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_OPENSUSE_TREE %s
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}x86_64-w64-mingw32"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}x86_64-w64-mingw32/sys-root/mingw/include"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include-fixed"
+
+
+// RUN: %clang -target i686-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_arch_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_ARCH_TREE %s
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0{{/|\\\\}}i686-w64-mingw32"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0{{/|\\\\}}backward"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include-fixed"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
+
+
+// RUN: %clang -target x86_64-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_ubuntu_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_UBUNTU_TREE %s
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8{{/|\\\\}}x86_64-w64-mingw32"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8{{/|\\\\}}backward"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}4.8{{/|\\\\}}include"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}4.8{{/|\\\\}}include-fixed"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
diff --git a/test/Headers/pmmintrin.c b/test/Headers/pmmintrin.c
deleted file mode 100644
index 5b7a3a4ef6b9..000000000000
--- a/test/Headers/pmmintrin.c
+++ /dev/null
@@ -1,12 +0,0 @@
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding %s -verify
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding -x c++ %s -verify
-// expected-no-diagnostics
-
-#if defined(i386) || defined(__x86_64__)
-#include <pmmintrin.h>
-
-int __attribute__((__target__(("sse3")))) foo(int a) {
- _mm_mwait(0, 0);
- return 4;
-}
-#endif
diff --git a/test/Headers/x86intrin-2.c b/test/Headers/x86intrin-2.c
deleted file mode 100644
index f98fdbd13a8a..000000000000
--- a/test/Headers/x86intrin-2.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding %s -verify
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding -fno-lax-vector-conversions %s -verify
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding -x c++ %s -verify
-// expected-no-diagnostics
-
-#if defined(i386) || defined(__x86_64__)
-
-// Include the metaheader that includes all x86 intrinsic headers.
-#include <x86intrin.h>
-
-void __attribute__((__target__("mmx"))) mm_empty_wrap(void) {
- _mm_empty();
-}
-
-__m128 __attribute__((__target__("sse"))) mm_add_ss_wrap(__m128 a, __m128 b) {
- return _mm_add_ss(a, b);
-}
-
-__m128d __attribute__((__target__("sse2"))) mm_sqrt_sd_wrap(__m128d a, __m128d b) {
- return _mm_sqrt_sd(a, b);
-}
-
-void __attribute__((__target__("sse3"))) mm_mwait_wrap(int a) {
- _mm_mwait(0, 0);
-}
-
-__m64 __attribute__((__target__("ssse3"))) mm_abs_pi8_wrap(__m64 a) {
- return _mm_abs_pi8(a);
-}
-
-__m128i __attribute__((__target__("sse4.1"))) mm_minpos_epu16_wrap(__m128i v) {
- return _mm_minpos_epu16(v);
-}
-
-unsigned int __attribute__((__target__("sse4.2"))) mm_crc32_u8_wrap(unsigned int c, unsigned char d) {
- return _mm_crc32_u8(c, d);
-}
-
-__m128i __attribute__((__target__("aes"))) mm_aesenc_si128_wrap(__m128i v, __m128i r) {
- return _mm_aesenc_si128(v, r);
-}
-
-__m256d __attribute__((__target__("avx"))) mm256_add_pd_wrap(__m256d a, __m256d b) {
- return _mm256_add_pd(a, b);
-}
-
-__m256i __attribute__((__target__("avx2"))) mm256_abs_epi8_wrap(__m256i a) {
- return _mm256_abs_epi8(a);
-}
-
-unsigned short __attribute__((__target__("bmi"))) tzcnt_u16_wrap(unsigned short x) {
- return __tzcnt_u16(x);
-}
-
-unsigned int __attribute__((__target__("bmi2"))) bzhi_u32_wrap(unsigned int x, unsigned int y) {
- return _bzhi_u32(x, y);
-}
-
-unsigned short __attribute__((__target__("lzcnt"))) lzcnt16_wrap(unsigned short x) {
- return __lzcnt16(x);
-}
-
-__m256d __attribute__((__target__("fma"))) mm256_fmsubadd_pd_wrap(__m256d a, __m256d b, __m256d c) {
- return _mm256_fmsubadd_pd(a, b, c);
-}
-
-__m512i __attribute__((__target__("avx512f"))) mm512_setzero_si512_wrap(void) {
- return _mm512_setzero_si512();
-}
-
-__mmask8 __attribute__((__target__("avx512vl"))) mm_cmpeq_epi32_mask_wrap(__m128i a, __m128i b) {
- return _mm_cmpeq_epi32_mask(a, b);
-}
-
-__v64qi __attribute__((__target__("avx512bw"))) mm512_setzero_qi_wrap(void) {
- return _mm512_setzero_qi();
-}
-
-__m512i __attribute__((__target__("avx512dq"))) mm512_mullo_epi64_wrap(__m512i a, __m512i b) {
- return _mm512_mullo_epi64(a, b);
-}
-
-__mmask16 __attribute__((__target__("avx512vl,avx512bw"))) mm_cmpeq_epi8_mask_wrap(__m128i a, __m128i b) {
- return _mm_cmpeq_epi8_mask(a, b);
-}
-
-__m256i __attribute__((__target__("avx512vl,avx512dq"))) mm256_mullo_epi64_wrap(__m256i a, __m256i b) {
- return _mm256_mullo_epi64(a, b);
-}
-
-int __attribute__((__target__("rdrnd"))) rdrand16_step_wrap(unsigned short *p) {
- return _rdrand16_step(p);
-}
-
-#if defined(__x86_64__)
-unsigned int __attribute__((__target__("fsgsbase"))) readfsbase_u32_wrap(void) {
- return _readfsbase_u32();
-}
-#endif
-
-unsigned int __attribute__((__target__("rtm"))) xbegin_wrap(void) {
- return _xbegin();
-}
-
-__m128i __attribute__((__target__("sha"))) mm_sha1nexte_epu32_wrap(__m128i x, __m128i y) {
- return _mm_sha1nexte_epu32(x, y);
-}
-
-int __attribute__((__target__("rdseed"))) rdseed16_step_wrap(unsigned short *p) {
- return _rdseed16_step(p);
-}
-
-__m128i __attribute__((__target__("sse4a"))) mm_extract_si64_wrap(__m128i x, __m128i y) {
- return _mm_extract_si64(x, y);
-}
-
-__m128 __attribute__((__target__("fma4"))) mm_macc_ps_wrap(__m128 a, __m128 b, __m128 c) {
- return _mm_macc_ps(a, b, c);
-}
-
-__m256 __attribute__((__target__("xop"))) mm256_frcz_ps_wrap(__m256 a) {
- return _mm256_frcz_ps(a);
-}
-
-unsigned int __attribute__((__target__("tbm"))) blcfill_u32_wrap(unsigned int a) {
- return __blcfill_u32(a);
-}
-
-__m128 __attribute__((__target__("f16c"))) mm_cvtph_ps_wrap(__m128i a) {
- return _mm_cvtph_ps(a);
-}
-
-int __attribute__((__target__("rtm"))) xtest_wrap(void) {
- return _xtest();
-}
-
-#endif
diff --git a/test/Headers/x86intrin.c b/test/Headers/x86intrin.c
index 7c15c4816b33..6a1608b756a0 100644
--- a/test/Headers/x86intrin.c
+++ b/test/Headers/x86intrin.c
@@ -5,7 +5,126 @@
#if defined(i386) || defined(__x86_64__)
-// Include the metaheader that includes all x86 intrinsic headers.
+// Pretend to enable all features.
+#ifndef __3dNOW__
+#define __3dNOW__
+#endif
+#ifndef __BMI__
+#define __BMI__
+#endif
+#ifndef __BMI2__
+#define __BMI2__
+#endif
+#ifndef __LZCNT__
+#define __LZCNT__
+#endif
+#ifndef __POPCNT__
+#define __POPCNT__
+#endif
+#ifndef __RDSEED__
+#define __RDSEED__
+#endif
+#ifndef __PRFCHW__
+#define __PRFCHW__
+#endif
+#ifndef __SSE4A__
+#define __SSE4A__
+#endif
+#ifndef __FMA4__
+#define __FMA4__
+#endif
+#ifndef __XOP__
+#define __XOP__
+#endif
+#ifndef __F16C__
+#define __F16C__
+#endif
+#ifndef __MMX__
+#define __MMX__
+#endif
+#ifndef __SSE__
+#define __SSE__
+#endif
+#ifndef __SSE2__
+#define __SSE2__
+#endif
+#ifndef __SSE3__
+#define __SSE3__
+#endif
+#ifndef __SSSE3__
+#define __SSSE3__
+#endif
+#ifndef __SSE4_1__
+#define __SSE4_1__
+#endif
+#ifndef __SSE4_2__
+#define __SSE4_2__
+#endif
+#ifndef __AES__
+#define __AES__
+#endif
+#ifndef __AVX__
+#define __AVX__
+#endif
+#ifndef __AVX2__
+#define __AVX2__
+#endif
+#ifndef __BMI__
+#define __BMI__
+#endif
+#ifndef __BMI2__
+#define __BMI2__
+#endif
+#ifndef __LZCNT__
+#define __LZCNT__
+#endif
+#ifndef __FMA__
+#define __FMA__
+#endif
+#ifndef __RDRND__
+#define __RDRND__
+#endif
+#ifndef __SHA__
+#define __SHA__
+#endif
+#ifndef __ADX__
+#define __ADX__
+#endif
+#ifndef __TBM__
+#define __TBM__
+#endif
+#ifndef __RTM__
+#define __RTM__
+#endif
+#ifndef __PCLMUL__
+#define __PCLMUL__
+#endif
+#ifndef __FSGSBASE__
+#define __FSGSBASE__
+#endif
+#ifndef __AVX512F__
+#define __AVX512F__
+#endif
+#ifndef __AVX512VL__
+#define __AVX512VL__
+#endif
+#ifndef __AVX512BW__
+#define __AVX512BW__
+#endif
+#ifndef __AVX512ER__
+#define __AVX512ER__
+#endif
+#ifndef __AVX512PF__
+#define __AVX512PF__
+#endif
+#ifndef __AVX512DQ__
+#define __AVX512DQ__
+#endif
+#ifndef __AVX512CD__
+#define __AVX512CD__
+#endif
+
+// Now include the metaheader that includes all x86 intrinsic headers.
#include <x86intrin.h>
#endif
diff --git a/test/Lexer/has_attribute_objc_boxable.m b/test/Lexer/has_attribute_objc_boxable.m
new file mode 100644
index 000000000000..e172ecaba2bb
--- /dev/null
+++ b/test/Lexer/has_attribute_objc_boxable.m
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -E %s -o - | FileCheck %s
+
+#if __has_attribute(objc_boxable)
+int has_objc_boxable_attribute();
+#endif
+
+// CHECK: has_objc_boxable_attribute
+
diff --git a/test/Lexer/has_feature_boxed_nsvalue_expressions.m b/test/Lexer/has_feature_boxed_nsvalue_expressions.m
new file mode 100644
index 000000000000..8c66bcb3d760
--- /dev/null
+++ b/test/Lexer/has_feature_boxed_nsvalue_expressions.m
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -E %s -o - | FileCheck %s
+
+#if __has_feature(objc_boxed_nsvalue_expressions)
+int has_objc_boxed_nsvalue_expressions();
+#endif
+
+// CHECK: has_objc_boxed_nsvalue_expressions
+
diff --git a/test/Misc/cc1as-asm.s b/test/Misc/cc1as-asm.s
new file mode 100644
index 000000000000..af92644073ac
--- /dev/null
+++ b/test/Misc/cc1as-asm.s
@@ -0,0 +1,3 @@
+// Run cc1as asm output path just to make sure it works
+// REQUIRES: x86-registered-target
+// RUN: %clang -cc1as -triple x86_64-apple-macosx10.10.0 -filetype asm %s -o /dev/null
diff --git a/test/Modules/pch_container.m b/test/Modules/pch_container.m
index 095245bdc3a4..95ef3edcd668 100644
--- a/test/Modules/pch_container.m
+++ b/test/Modules/pch_container.m
@@ -1,9 +1,11 @@
@import DependsOnModule;
// REQUIRES: x86-registered-target
-// RUN: rm -rf %t-MachO %t-ELF %t-ELF_SPLIT %t-COFF
-// RUN: %clang_cc1 -triple=x86_64-apple-darwin -fmodules -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-MachO -F %S/Inputs %s
-// RUN: %clang_cc1 -triple=x86_64-linux-elf -fmodules -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-ELF -F %S/Inputs %s
-// RUN: %clang_cc1 -triple=x86_64-windows-coff -fmodules -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-COFF -F %S/Inputs %s
+// RUN: rm -rf %t-MachO %t-ELF %t-ELF_SPLIT %t-COFF %t-raw
+// RUN: %clang_cc1 -triple=x86_64-apple-darwin -fmodules -fmodule-format=obj -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-MachO -F %S/Inputs %s
+// RUN: %clang_cc1 -triple=x86_64-linux-elf -fmodules -fmodule-format=obj -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-ELF -F %S/Inputs %s
+// RUN: %clang_cc1 -triple=x86_64-windows-coff -fmodules -fmodule-format=obj -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-COFF -F %S/Inputs %s
+// RUN: %clang_cc1 -triple=x86_64-apple-darwin -fmodules -fmodule-format=raw -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-raw -F %S/Inputs %s
+
// RUN: llvm-objdump -section-headers %t-MachO/DependsOnModule.pcm %t-ELF/DependsOnModule.pcm %t-COFF/DependsOnModule.pcm | FileCheck %s
// CHECK: file format Mach-O 64-bit x86-64
@@ -13,5 +15,6 @@
// CHECK: file format COFF-x86-64
// CHECK: clangast {{[0-9a-f]+}} {{[0-9a-f]+}}
+// RUN: not llvm-objdump -section-headers %t-raw/DependsOnModule.pcm
// RUN: %clang_cc1 -split-dwarf-file t-split.dwo -triple=x86_64-linux-elf -fmodules -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-ELF_SPLIT -F %S/Inputs %s -o %t-split.o
diff --git a/test/OpenMP/for_codegen.cpp b/test/OpenMP/for_codegen.cpp
index 082a0d4d8841..9db157011075 100644
--- a/test/OpenMP/for_codegen.cpp
+++ b/test/OpenMP/for_codegen.cpp
@@ -329,14 +329,9 @@ void test_precond() {
// CHECK: [[A_ADDR:%.+]] = alloca i8,
// CHECK: [[I_ADDR:%.+]] = alloca i8,
char a = 0;
- // CHECK: store i32 0, i32* [[IV_ADDR:%.+]],
- // CHECK: [[A:%.+]] = load i8, i8* [[A_ADDR]],
- // CHECK: [[CONV:%.+]] = sext i8 [[A]] to i32
- // CHECK: [[IV:%.+]] = load i32, i32* [[IV_ADDR]],
- // CHECK: [[MUL:%.+]] = mul nsw i32 [[IV]], 1
- // CHECK: [[ADD:%.+]] = add nsw i32 [[CONV]], [[MUL]]
- // CHECK: [[CONV:%.+]] = trunc i32 [[ADD]] to i8
- // CHECK: store i8 [[CONV]], i8* [[I_ADDR]],
+ // CHECK: store i8 0,
+ // CHECK: store i32
+ // CHECK: store i8
// CHECK: [[A:%.+]] = load i8, i8* [[A_ADDR]],
// CHECK: [[CONV:%.+]] = sext i8 [[A]] to i32
// CHECK: [[CMP:%.+]] = icmp slt i32 [[CONV]], 10
@@ -389,6 +384,7 @@ void for_with_global_lcv() {
// CHECK: store i8 [[I_VAL]], i8* [[K]]
// CHECK-NOT: [[I]]
// CHECK: call void @__kmpc_for_static_fini(
+// CHECK: call void @__kmpc_barrier(
#pragma omp for
for (i = 0; i < 2; ++i) {
k = i;
@@ -410,5 +406,70 @@ void for_with_global_lcv() {
}
}
-#endif // HEADER
+struct Bool {
+ Bool(bool b) : b(b) {}
+ operator bool() const { return b; }
+ const bool b;
+};
+
+template <typename T>
+struct It {
+ It() : p(0) {}
+ It(const It &, int = 0) ;
+ template <typename U>
+ It(U &, int = 0) ;
+ It &operator=(const It &);
+ It &operator=(It &);
+ ~It() {}
+
+ It(T *p) : p(p) {}
+
+ operator T *&() { return p; }
+ operator T *() const { return p; }
+ T *operator->() const { return p; }
+
+ It &operator++() { ++p; return *this; }
+ It &operator--() { --p; return *this; }
+ It &operator+=(unsigned n) { p += n; return *this; }
+ It &operator-=(unsigned n) { p -= n; return *this; }
+
+ T *p;
+};
+
+template <typename T>
+It<T> operator+(It<T> a, typename It<T>::difference_type n) { return a.p + n; }
+
+template <typename T>
+It<T> operator+(typename It<T>::difference_type n, It<T> a) { return a.p + n; }
+
+template <typename T>
+It<T> operator-(It<T> a, typename It<T>::difference_type n) { return a.p - n; }
+
+typedef Bool BoolType;
+
+template <typename T>
+BoolType operator<(It<T> a, It<T> b) { return a.p < b.p; }
+void loop_with_It(It<char> begin, It<char> end) {
+#pragma omp for
+ for (It<char> it = begin; it < end; ++it) {
+ *it = 0;
+ }
+}
+
+// CHECK-LABEL: loop_with_It
+// CHECK: call i32 @__kmpc_global_thread_num(
+// CHECK: call void @__kmpc_for_static_init_8(
+// CHECK: call void @__kmpc_for_static_fini(
+
+void loop_with_stmt_expr() {
+#pragma omp for
+ for (int i = __extension__({float b = 0;b; }); i < __extension__({double c = 1;c; }); i += __extension__({char d = 1; d; }))
+ ;
+}
+// CHECK-LABEL: loop_with_stmt_expr
+// CHECK: call i32 @__kmpc_global_thread_num(
+// CHECK: call void @__kmpc_for_static_init_4(
+// CHECK: call void @__kmpc_for_static_fini(
+
+#endif // HEADER
diff --git a/test/OpenMP/for_loop_messages.cpp b/test/OpenMP/for_loop_messages.cpp
index f425defdf3e2..1a1315507893 100644
--- a/test/OpenMP/for_loop_messages.cpp
+++ b/test/OpenMP/for_loop_messages.cpp
@@ -423,12 +423,12 @@ public:
typedef int difference_type;
typedef std::random_access_iterator_tag iterator_category;
};
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
int operator-(GoodIter a, GoodIter b) { return 0; }
// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}}
GoodIter operator-(GoodIter a) { return a; }
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
GoodIter operator-(GoodIter a, int v) { return GoodIter(); }
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}}
@@ -479,7 +479,7 @@ int test_with_random_access_iterator() {
#pragma omp for
for (begin = GoodIter(0); begin < end; ++begin)
++begin;
-// expected-error@+4 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}}
+// expected-error@+4 {{invalid operands to binary expression ('GoodIter' and 'Iter0')}}
// expected-error@+3 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
#pragma omp parallel
#pragma omp for
@@ -562,7 +562,7 @@ int test_with_random_access_iterator() {
for (Iter1 I = begin1; I >= end1; ++I)
++I;
#pragma omp parallel
-// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// Initializer is constructor with all default params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
diff --git a/test/OpenMP/for_simd_loop_messages.cpp b/test/OpenMP/for_simd_loop_messages.cpp
index 6e68eb8a082d..da7e6843a0c4 100644
--- a/test/OpenMP/for_simd_loop_messages.cpp
+++ b/test/OpenMP/for_simd_loop_messages.cpp
@@ -406,12 +406,12 @@ public:
typedef int difference_type;
typedef std::random_access_iterator_tag iterator_category;
};
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
int operator-(GoodIter a, GoodIter b) { return 0; }
// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}}
GoodIter operator-(GoodIter a) { return a; }
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
GoodIter operator-(GoodIter a, int v) { return GoodIter(); }
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}}
@@ -463,7 +463,7 @@ int test_with_random_access_iterator() {
for (begin = GoodIter(0); begin < end; ++begin)
++begin;
#pragma omp parallel
-// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}}
+// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'Iter0')}}
// expected-error@+2 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
#pragma omp for simd
for (begin = begin0; begin < end; ++begin)
@@ -544,7 +544,7 @@ int test_with_random_access_iterator() {
for (Iter1 I = begin1; I >= end1; ++I)
++I;
#pragma omp parallel
-// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// Initializer is constructor with all default params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
diff --git a/test/OpenMP/openmp_common.c b/test/OpenMP/openmp_common.c
index 3765f4c5dc14..ed55796b3374 100644
--- a/test/OpenMP/openmp_common.c
+++ b/test/OpenMP/openmp_common.c
@@ -7,3 +7,8 @@ void foo() {
#pragma omp // expected-error {{expected an OpenMP directive}}
#pragma omp unknown_directive // expected-error {{expected an OpenMP directive}}
}
+
+typedef struct S {
+#pragma omp parallel for private(j) schedule(static) if (tree1->totleaf > 1024) // expected-error {{unexpected OpenMP directive '#pragma omp parallel for'}}
+} St;
+
diff --git a/test/OpenMP/parallel_copyin_codegen.cpp b/test/OpenMP/parallel_copyin_codegen.cpp
index 281d82ce9705..4aacb809573f 100644
--- a/test/OpenMP/parallel_copyin_codegen.cpp
+++ b/test/OpenMP/parallel_copyin_codegen.cpp
@@ -4,6 +4,14 @@
// RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -DLAMBDA -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
// RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -fblocks -DBLOCKS -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
// RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=ARRAY %s
+
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s -check-prefix=TLS-CHECK
+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple %itanium_abi_triple -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp -x c++ -triple %itanium_abi_triple -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TLS-CHECK
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=TLS-LAMBDA %s
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=TLS-BLOCKS %s
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DARRAY -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck -check-prefix=TLS-ARRAY %s
+// REQUIRES: tls
// expected-no-diagnostics
#ifndef ARRAY
#ifndef HEADER
@@ -25,7 +33,9 @@ struct S {
// CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
// CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
// CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
-
+// TLS-CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
+// TLS-CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
+// TLS-CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
// CHECK-DAG: [[T_VAR:@.+]] = internal global i{{[0-9]+}} 1122,
// CHECK-DAG: [[VEC:@.+]] = internal global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 1, i{{[0-9]+}} 2],
@@ -35,6 +45,15 @@ struct S {
// CHECK-DAG: [[TMAIN_VEC:@.+]] = linkonce_odr global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 3, i{{[0-9]+}} 3],
// CHECK-DAG: [[TMAIN_S_ARR:@.+]] = linkonce_odr global [2 x [[S_INT_TY]]] zeroinitializer,
// CHECK-DAG: [[TMAIN_VAR:@.+]] = linkonce_odr global [[S_INT_TY]] zeroinitializer,
+// TLS-CHECK-DAG: [[T_VAR:@.+]] = internal thread_local global i{{[0-9]+}} 1122,
+// TLS-CHECK-DAG: [[VEC:@.+]] = internal thread_local global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 1, i{{[0-9]+}} 2],
+// TLS-CHECK-DAG: [[S_ARR:@.+]] = internal thread_local global [2 x [[S_FLOAT_TY]]] zeroinitializer,
+// TLS-CHECK-DAG: [[VAR:@.+]] = internal thread_local global [[S_FLOAT_TY]] zeroinitializer,
+// TLS-CHECK-DAG: [[TMAIN_T_VAR:@.+]] = linkonce_odr thread_local global i{{[0-9]+}} 333,
+// TLS-CHECK-DAG: [[TMAIN_VEC:@.+]] = linkonce_odr thread_local global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 3, i{{[0-9]+}} 3],
+// TLS-CHECK-DAG: [[TMAIN_S_ARR:@.+]] = linkonce_odr thread_local global [2 x [[S_INT_TY]]] zeroinitializer,
+// TLS-CHECK-DAG: [[TMAIN_VAR:@.+]] = linkonce_odr thread_local global [[S_INT_TY]] zeroinitializer,
+
template <typename T>
T tmain() {
S<T> test;
@@ -59,12 +78,28 @@ int main() {
// LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
// LAMBDA-LABEL: @main
// LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]](
+ // TLS-LAMBDA: [[G_CAPTURE_TY:%.+]] = type { i{{[0-9]+}}* }
+ // TLS-LAMBDA: [[G:@.+]] = {{.*}}thread_local {{.*}}global i{{[0-9]+}} 1212,
+ // TLS-LAMBDA-LABEL: @main
+ // TLS-LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]](
[&]() {
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
// LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8*
+
+ // TLS-LAMBDA-DAG: [[G_CPY_ADDR:%.+]] = getelementptr inbounds [[G_CAPTURE_TY]], [[G_CAPTURE_TY]]* [[G_CAPTURE:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
+ // TLS-LAMBDA-DAG: [[G_CPY_VAL:%.+]] = call i{{[0-9]+}}* [[G_CTOR:@.+]]()
+ // TLS-LAMBDA: store i{{[0-9]+}}* [[G_CPY_VAL]], i{{[0-9]+}}** [[G_CPY_ADDR]]
+ // TLS-LAMBDA: [[G_CAPTURE_CAST:%.+]] = bitcast [[G_CAPTURE_TY]]* [[G_CAPTURE]] to i8*
+ // TLS-LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* [[G_CAPTURE_CAST]]
+
+ // TLS-LAMBDA: define {{.*}}i{{[0-9]+}}* [[G_CTOR]]() {
+ // TLS-LAMBDA: ret i{{[0-9]+}}* [[G]]
+ // TLS-LAMBDA: }
+
#pragma omp parallel copyin(g)
{
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
+ // TLS-LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, [[G_CAPTURE_TY]]* [[ARG:%.+]])
// threadprivate_g = g;
// LAMBDA: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
@@ -76,14 +111,31 @@ int main() {
// LAMBDA: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
// LAMBDA: [[DONE]]
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_FIELD:%.+]] = getelementptr inbounds [[G_CAPTURE_TY]], [[G_CAPTURE_TY]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_SRC:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_CAPTURE_FIELD]]
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_SRCC:%.+]] = ptrtoint i{{[0-9]+}}* [[G_CAPTURE_SRC]] to i{{[0-9]+}}
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_DSTC:%.+]] = ptrtoint i{{[0-9]+}}* [[G_CAPTURE_DST]] to i{{[0-9]+}}
+ // TLS-LAMBDA: icmp ne i{{[0-9]+}} {{%.+}}, {{%.+}}
+ // TLS-LAMBDA: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+ // TLS-LAMBDA: [[NOT_MASTER]]
+ // TLS-LAMBDA: load i{{[0-9]+}}, i{{[0-9]+}}* [[G_CAPTURE_SRC]],
+ // TLS-LAMBDA: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* [[G_CAPTURE_DST]],
+ // TLS-LAMBDA: [[DONE]]
+
// LAMBDA: call {{.*}}i32 @__kmpc_cancel_barrier(
+ // TLS-LAMBDA: call {{.*}}i32 @__kmpc_cancel_barrier(
g = 1;
// LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}*
+ // TLS-LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}*
[&]() {
// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
g = 2;
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
+
+ // TLS-LAMBDA: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-LAMBDA: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_CAPTURE_DST]]
}();
}
}();
@@ -92,12 +144,28 @@ int main() {
// BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
// BLOCKS-LABEL: @main
// BLOCKS: call {{.*}}void {{%.+}}(i8
+
+ // TLS-BLOCKS: [[G_CAPTURE_TY:%.+]] = type { i{{[0-9]+}}* }
+ // TLS-BLOCKS: [[G:@.+]] = {{.*}}thread_local {{.*}}global i{{[0-9]+}} 1212,
+ // TLS-BLOCKS-LABEL: @main
+ // TLS-BLOCKS: call {{.*}}void {{%.+}}(i8
^{
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
// BLOCKS: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8*
+
+ // TLS-BLOCKS-DAG: [[G_CPY_ADDR:%.+]] = getelementptr inbounds [[G_CAPTURE_TY]], [[G_CAPTURE_TY]]* [[G_CAPTURE:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
+ // TLS-BLOCKS-DAG: [[G_CPY_VAL:%.+]] = call i{{[0-9]+}}* [[G_CTOR:@.+]]()
+ // TLS-BLOCKS: store i{{[0-9]+}}* [[G_CPY_VAL]], i{{[0-9]+}}** [[G_CPY_ADDR]]
+ // TLS-BLOCKS: [[G_CAPTURE_CAST:%.+]] = bitcast [[G_CAPTURE_TY]]* [[G_CAPTURE]] to i8*
+ // TLS-BLOCKS: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* [[G_CAPTURE_CAST]]
+
+ // TLS-BLOCKS: define {{.*}}i{{[0-9]+}}* [[G_CTOR]]() {
+ // TLS-BLOCKS: ret i{{[0-9]+}}* [[G]]
+ // TLS-BLOCKS: }
#pragma omp parallel copyin(g)
{
// BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
+ // TLS-BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, [[G_CAPTURE_TY]]* [[ARG:%.+]])
// threadprivate_g = g;
// BLOCKS: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
@@ -109,19 +177,44 @@ int main() {
// BLOCKS: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
// BLOCKS: [[DONE]]
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_FIELD:%.+]] = getelementptr inbounds [[G_CAPTURE_TY]], [[G_CAPTURE_TY]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_SRC:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_CAPTURE_FIELD]]
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_SRCC:%.+]] = ptrtoint i{{[0-9]+}}* [[G_CAPTURE_SRC]] to i{{[0-9]+}}
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_DSTC:%.+]] = ptrtoint i{{[0-9]+}}* [[G_CAPTURE_DST]] to i{{[0-9]+}}
+ // TLS-BLOCKS: icmp ne i{{[0-9]+}} {{%.+}}, {{%.+}}
+ // TLS-BLOCKS: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+ // TLS-BLOCKS: [[NOT_MASTER]]
+ // TLS-BLOCKS: load i{{[0-9]+}}, i{{[0-9]+}}* [[G_CAPTURE_SRC]],
+ // TLS-BLOCKS: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* [[G_CAPTURE_DST]],
+ // TLS-BLOCKS: [[DONE]]
+
// BLOCKS: call {{.*}}i32 @__kmpc_cancel_barrier(
+ // TLS-BLOCKS: call {{.*}}i32 @__kmpc_cancel_barrier(
g = 1;
// BLOCKS: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}*
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: call {{.*}}void {{%.+}}(i8
+
+ // TLS-BLOCKS: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-BLOCKS: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_CAPTURE_DST]]
+ // TLS-BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // TLS-BLOCKS: call {{.*}}void {{%.+}}(i8
^{
// BLOCKS: define {{.+}} void {{@.+}}(i8*
+ // TLS-BLOCKS: define {{.+}} void {{@.+}}(i8*
g = 2;
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
// BLOCKS: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}*
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: ret
+
+ // TLS-BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // TLS-BLOCKS: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-BLOCKS: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_CAPTURE_DST]]
+ // TLS-BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // TLS-BLOCKS: ret
}();
}
}();
@@ -153,12 +246,34 @@ int main() {
// CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
// CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
// CHECK: ret
-//
+
+// TLS-CHECK-LABEL: @main
+// TLS-CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
+// TLS-CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* [[TEST]], [[S_FLOAT_TY]]*
+// TLS-CHECK-DAG: store i{{[0-9]+}}* [[T_VAR]], i{{[0-9]+}}** [[T_VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: store [2 x i{{[0-9]+}}]* [[VEC]], [2 x i{{[0-9]+}}]** [[VEC_FIELD:%.+]],
+// TLS-CHECK-DAG: store [2 x [[S_FLOAT_TY]]]* [[S_ARR]], [2 x [[S_FLOAT_TY]]]** [[S_ARR_FIELD:%.+]],
+// TLS-CHECK-DAG: store [[S_FLOAT_TY]]* [[VAR]], [[S_FLOAT_TY]]** [[VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: [[T_VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[VEC_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[S_ARR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[MAIN_MICROTASK:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// TLS-CHECK-DAG: store i{{[0-9]+}}* [[T_VAR]], i{{[0-9]+}}** [[T_VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: [[T_VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[MAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// TLS-CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
+// TLS-CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
+// TLS-CHECK: ret
+
// CHECK: define internal {{.*}}void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: define internal {{.*}}void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// TLS-CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+
// threadprivate_t_var = t_var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[T_VAR]]
// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
@@ -168,10 +283,24 @@ int main() {
// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR]],
// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load i32*, i32** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_LONG:%.+]] = ptrtoint i32* [[MASTER_REF]] to i{{[0-9]+}}
+// TLS-CHECK: icmp ne i{{[0-9]+}} [[MASTER_LONG]], ptrtoint (i{{[0-9]+}}* [[T_VAR]] to i{{[0-9]+}})
+// TLS-CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// TLS-CHECK: [[NOT_MASTER]]
+// TLS-CHECK: [[MASTER_VAL:%.+]] = load i32, i32* [[MASTER_REF]]
+// TLS-CHECK: store i32 [[MASTER_VAL]], i32* [[T_VAR]]
+
// threadprivate_vec = vec;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[VEC]]
// CHECK: call void @llvm.memcpy{{.*}}(i8* %{{.+}}, i8* bitcast ([2 x i{{[0-9]+}}]* [[VEC]] to i8*),
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [2 x i32]*, [2 x i32]** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_CAST:%.+]] = bitcast [2 x i32]* [[MASTER_REF]] to i8*
+// TLS-CHECK: call void @llvm.memcpy{{.*}}(i8* bitcast ([2 x i{{[0-9]+}}]* [[VEC]] to i8*), i8* [[MASTER_CAST]]
+
// threadprivate_s_arr = s_arr;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[S_ARR]]
// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* {{%.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
@@ -182,19 +311,43 @@ int main() {
// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_CAST:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[MASTER_REF]] to [[S_FLOAT_TY]]*
+// TLS-CHECK-DAG: [[S_ARR_SRC_BEGIN:%.+]] = phi [[S_FLOAT_TY]]* {{.*}}[[MASTER_CAST]]
+// TLS-CHECK-DAG: [[S_ARR_DST_BEGIN:%.+]] = phi [[S_FLOAT_TY]]* {{.*}}getelementptr inbounds ([2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0)
+// TLS-CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
+// TLS-CHECK-DAG: [[S_ARR_SRC_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_SRC_BEGIN]], i{{[0-9]+}} 1
+// TLS-CHECK-DAG: [[S_ARR_DST_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_DST_BEGIN]], i{{[0-9]+}} 1
+// TLS-CHECK: icmp eq [[S_FLOAT_TY]]* [[S_ARR_DST_END]], getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* getelementptr inbounds ([2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0), i{{[0-9]+}} 2)
+// TLS-CHECK: br i1 %{{.*}}, label %[[ARR_DONE:.+]], label {{.*}}
+// TLS-CHECK: [[ARR_DONE]]
+
// threadprivate_var = var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[VAR]]
// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{%.+}}, [[S_FLOAT_TY]]* {{.*}}[[VAR]])
// CHECK: [[DONE]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[MASTER_FIELD]]
+// TLS-CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{.*}}[[VAR]], [[S_FLOAT_TY]]* {{.*}}[[MASTER_REF]])
+
// CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
+// TLS-CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// TLS-CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// TLS-CHECK: ret void
+
// CHECK: define internal {{.*}}void [[MAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: define internal {{.*}}void [[MAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// TLS-CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+
// threadprivate_t_var = t_var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[T_VAR]]
// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
@@ -205,9 +358,24 @@ int main() {
// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
// CHECK: [[DONE]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load i32*, i32** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_LONG:%.+]] = ptrtoint i32* [[MASTER_REF]] to i{{[0-9]+}}
+// TLS-CHECK: icmp ne i{{[0-9]+}} [[MASTER_LONG]], ptrtoint (i{{[0-9]+}}* [[T_VAR]] to i{{[0-9]+}})
+// TLS-CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// TLS-CHECK: [[NOT_MASTER]]
+// TLS-CHECK: [[MASTER_VAL:%.+]] = load i32, i32* [[MASTER_REF]]
+// TLS-CHECK: store i32 [[MASTER_VAL]], i32* [[T_VAR]]
+// TLS-CHECK: [[DONE]]
+
// CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
+// TLS-CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// TLS-CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// TLS-CHECK: ret void
+
// CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[TEST]], [[S_INT_TY]]*
@@ -215,11 +383,30 @@ int main() {
// CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
// CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
// CHECK: ret
+
+// TLS-CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
+// TLS-CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
+// TLS-CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[TEST]], [[S_INT_TY]]*
+// TLS-CHECK-DAG: store i{{[0-9]+}}* [[TMAIN_T_VAR]], i{{[0-9]+}}** [[T_VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: store [2 x i{{[0-9]+}}]* [[TMAIN_VEC]], [2 x i{{[0-9]+}}]** [[VEC_FIELD:%.+]],
+// TLS-CHECK-DAG: store [2 x [[S_INT_TY]]]* [[TMAIN_S_ARR]], [2 x [[S_INT_TY]]]** [[S_ARR_FIELD:%.+]],
+// TLS-CHECK-DAG: store [[S_INT_TY]]* [[TMAIN_VAR]], [[S_INT_TY]]** [[VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: [[T_VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[VEC_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[S_ARR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// TLS-CHECK-DAG: store i{{[0-9]+}}* [[TMAIN_T_VAR]], i{{[0-9]+}}** [[T_VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: [[T_VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
//
// CHECK: define internal {{.*}}void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+//
+// TLS-CHECK: define internal {{.*}}void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// TLS-CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// threadprivate_t_var = t_var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_T_VAR]]
@@ -230,10 +417,24 @@ int main() {
// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[TMAIN_T_VAR]],
// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load i32*, i32** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_LONG:%.+]] = ptrtoint i32* [[MASTER_REF]] to i{{[0-9]+}}
+// TLS-CHECK: icmp ne i{{[0-9]+}} [[MASTER_LONG]], ptrtoint (i{{[0-9]+}}* [[TMAIN_T_VAR]] to i{{[0-9]+}})
+// TLS-CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// TLS-CHECK: [[NOT_MASTER]]
+// TLS-CHECK: [[MASTER_VAL:%.+]] = load i32, i32* [[MASTER_REF]]
+// TLS-CHECK: store i32 [[MASTER_VAL]], i32* [[TMAIN_T_VAR]]
+
// threadprivate_vec = vec;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_VEC]]
// CHECK: call {{.*}}void @llvm.memcpy{{.*}}(i8* %{{.+}}, i8* bitcast ([2 x i{{[0-9]+}}]* [[TMAIN_VEC]] to i8*),
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [2 x i32]*, [2 x i32]** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_CAST:%.+]] = bitcast [2 x i32]* [[MASTER_REF]] to i8*
+// TLS-CHECK: call void @llvm.memcpy{{.*}}(i8* bitcast ([2 x i{{[0-9]+}}]* [[TMAIN_VEC]] to i8*), i8* [[MASTER_CAST]]
+
// threadprivate_s_arr = s_arr;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_S_ARR]]
// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* {{%.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
@@ -244,19 +445,43 @@ int main() {
// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_CAST:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[MASTER_REF]] to [[S_INT_TY]]*
+// TLS-CHECK-DAG: [[S_ARR_SRC_BEGIN:%.+]] = phi [[S_INT_TY]]* {{.*}}[[MASTER_CAST]]
+// TLS-CHECK-DAG: [[S_ARR_DST_BEGIN:%.+]] = phi [[S_INT_TY]]* {{.*}}getelementptr inbounds ([2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[TMAIN_S_ARR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0)
+// TLS-CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
+// TLS-CHECK-DAG: [[S_ARR_SRC_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_SRC_BEGIN]], i{{[0-9]+}} 1
+// TLS-CHECK-DAG: [[S_ARR_DST_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_DST_BEGIN]], i{{[0-9]+}} 1
+// TLS-CHECK: icmp eq [[S_INT_TY]]* [[S_ARR_DST_END]], getelementptr ([[S_INT_TY]], [[S_INT_TY]]* getelementptr inbounds ([2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[TMAIN_S_ARR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0), i{{[0-9]+}} 2)
+// TLS-CHECK: br i1 %{{.*}}, label %[[ARR_DONE:.+]], label {{.*}}
+// TLS-CHECK: [[ARR_DONE]]
+
// threadprivate_var = var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_VAR]]
// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{%.+}}, [[S_INT_TY]]* {{.*}}[[TMAIN_VAR]])
// CHECK: [[DONE]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[MASTER_FIELD]]
+// TLS-CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{.*}}[[TMAIN_VAR]], [[S_INT_TY]]* {{.*}}[[MASTER_REF]])
+
// CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
+// TLS-CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// TLS-CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// TLS-CHECK: ret void
+
// CHECK: define internal {{.*}}void [[TMAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: define internal {{.*}}void [[TMAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// TLS-CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+
// threadprivate_t_var = t_var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_T_VAR]]
// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
@@ -267,12 +492,30 @@ int main() {
// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
// CHECK: [[DONE]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load i32*, i32** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_LONG:%.+]] = ptrtoint i32* [[MASTER_REF]] to i{{[0-9]+}}
+// TLS-CHECK: icmp ne i{{[0-9]+}} [[MASTER_LONG]], ptrtoint (i{{[0-9]+}}* [[TMAIN_T_VAR]] to i{{[0-9]+}})
+// TLS-CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// TLS-CHECK: [[NOT_MASTER]]
+// TLS-CHECK: [[MASTER_VAL:%.+]] = load i32, i32* [[MASTER_REF]]
+// TLS-CHECK: store i32 [[MASTER_VAL]], i32* [[TMAIN_T_VAR]]
+// TLS-CHECK: [[DONE]]
+
// CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
+// TLS-CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// TLS-CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// TLS-CHECK: ret void
+
#endif
#else
// ARRAY-LABEL: array_func
+// TLS-ARRAY: [[ARR_CAPTURETY:%.+]] = type { [2 x {{.*}}]*, [2 x {{.*}}]* }
+// TLS-ARRAY-LABEL: array_func
+
struct St {
int a, b;
St() : a(0), b(0) {}
@@ -287,6 +530,18 @@ void array_func() {
// ARRAY: call i8* @__kmpc_threadprivate_cached(
// ARRAY: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* bitcast ([2 x i32]* @{{.+}} to i8*), i64 8, i32 4, i1 false)
// ARRAY: call dereferenceable(8) %struct.St* @{{.+}}(%struct.St* %{{.+}}, %struct.St* dereferenceable(8) %{{.+}})
+
+// TLS-ARRAY-DAG: [[ARR_CAP1:%.+]] = getelementptr inbounds [[ARR_CAPTURETY]], [[ARR_CAPTURETY]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
+// TLS-ARRAY-DAG: [[ARR_CAP2:%.+]] = getelementptr inbounds [[ARR_CAPTURETY]], [[ARR_CAPTURETY]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1
+// TLS-ARRAY-DAG: store [2 x {{.*}}]* @{{.*}}, [2 x {{.*}}]** [[ARR_CAP1]]
+// TLS-ARRAY-DAG: store [2 x {{.*}}]* @{{.*}}, [2 x {{.*}}]** [[ARR_CAP2]]
+// TLS-ARRAY: @__kmpc_fork_call(
+// TLS-ARRAY-DAG: call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast ([2 x i32]* @{{.+}} to i8*), i8* [[REF:%.+]], i64 8, i32 4, i1 false)
+// TLS-ARRAY-DAG: [[REF]] = bitcast [2 x i32]* [[REFT:%.+]] to i8*
+// TLS-ARRAY-DAG: [[REFT]] = load [2 x i32]*, [2 x i32]** [[FIELDADDR:%.+]],
+// TLS-ARRAY-DAG: [[FIELDADDR]] = getelementptr inbounds [[ARR_CAPTURETY]], [[ARR_CAPTURETY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}}
+// TLS-ARRAY: call dereferenceable(8) %struct.St* @{{.+}}(%struct.St* %{{.+}}, %struct.St* dereferenceable(8) %{{.+}})
+
#pragma omp threadprivate(a, s)
#pragma omp parallel copyin(a, s)
;
diff --git a/test/OpenMP/parallel_for_loop_messages.cpp b/test/OpenMP/parallel_for_loop_messages.cpp
index 09a15e2dc337..7d14a3c9b74c 100644
--- a/test/OpenMP/parallel_for_loop_messages.cpp
+++ b/test/OpenMP/parallel_for_loop_messages.cpp
@@ -353,12 +353,12 @@ public:
typedef int difference_type;
typedef std::random_access_iterator_tag iterator_category;
};
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
int operator-(GoodIter a, GoodIter b) { return 0; }
// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}}
GoodIter operator-(GoodIter a) { return a; }
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
GoodIter operator-(GoodIter a, int v) { return GoodIter(); }
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}}
@@ -401,7 +401,7 @@ int test_with_random_access_iterator() {
#pragma omp parallel for
for (begin = GoodIter(0); begin < end; ++begin)
++begin;
-// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}}
+// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'Iter0')}}
// expected-error@+2 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
#pragma omp parallel for
for (begin = begin0; begin < end; ++begin)
@@ -467,7 +467,7 @@ int test_with_random_access_iterator() {
#pragma omp parallel for
for (Iter1 I = begin1; I >= end1; ++I)
++I;
-// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// Initializer is constructor with all default params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
diff --git a/test/OpenMP/parallel_for_simd_loop_messages.cpp b/test/OpenMP/parallel_for_simd_loop_messages.cpp
index 0473b248a69d..85d8574ba9f4 100644
--- a/test/OpenMP/parallel_for_simd_loop_messages.cpp
+++ b/test/OpenMP/parallel_for_simd_loop_messages.cpp
@@ -353,12 +353,12 @@ public:
typedef int difference_type;
typedef std::random_access_iterator_tag iterator_category;
};
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
int operator-(GoodIter a, GoodIter b) { return 0; }
// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}}
GoodIter operator-(GoodIter a) { return a; }
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
GoodIter operator-(GoodIter a, int v) { return GoodIter(); }
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}}
@@ -401,7 +401,7 @@ int test_with_random_access_iterator() {
#pragma omp parallel for simd
for (begin = GoodIter(0); begin < end; ++begin)
++begin;
-// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}}
+// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'Iter0')}}
// expected-error@+2 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
#pragma omp parallel for simd
for (begin = begin0; begin < end; ++begin)
@@ -467,7 +467,7 @@ int test_with_random_access_iterator() {
#pragma omp parallel for simd
for (Iter1 I = begin1; I >= end1; ++I)
++I;
-// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// Initializer is constructor with all default params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
diff --git a/test/OpenMP/simd_linear_messages.cpp b/test/OpenMP/simd_linear_messages.cpp
index 78f905fd9460..6fac26290b4e 100644
--- a/test/OpenMP/simd_linear_messages.cpp
+++ b/test/OpenMP/simd_linear_messages.cpp
@@ -156,6 +156,7 @@ namespace C {
using A::x;
}
+int f;
int main(int argc, char **argv) {
double darr[100];
// expected-note@+1 {{in instantiation of function template specialization 'test_template<-4, double, int>' requested here}}
@@ -167,6 +168,8 @@ int main(int argc, char **argv) {
S5 g(5); // expected-note {{'g' defined here}}
int i;
int &j = i; // expected-note {{'j' defined here}}
+ #pragma omp simd linear(f) linear(f) // expected-error {{linear variable cannot be linear}} expected-note {{defined as linear}}
+ for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear // expected-error {{expected '(' after 'linear'}}
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}}
diff --git a/test/OpenMP/simd_loop_messages.cpp b/test/OpenMP/simd_loop_messages.cpp
index fe8e70a9b8a0..bd7dd5bf52d1 100644
--- a/test/OpenMP/simd_loop_messages.cpp
+++ b/test/OpenMP/simd_loop_messages.cpp
@@ -456,7 +456,7 @@ int test_with_random_access_iterator() {
++I;
// Initializer is constructor with all default params.
- // expected-error@+4 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+ // expected-error@+4 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+3 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
#pragma omp simd
diff --git a/test/PCH/objc_boxable.m b/test/PCH/objc_boxable.m
new file mode 100644
index 000000000000..2cd9c23d5417
--- /dev/null
+++ b/test/PCH/objc_boxable.m
@@ -0,0 +1,17 @@
+// Test objc_boxable update record
+
+// RUN: %clang_cc1 -x objective-c %S/objc_boxable_record.h -emit-pch -o %t1
+// RUN: %clang_cc1 -x objective-c %S/objc_boxable_record_attr.h -include-pch %t1 -emit-pch -o %t2
+// RUN: %clang_cc1 %s -include-pch %t2 -fsyntax-only -verify
+
+// expected-no-diagnostics
+
+__attribute__((objc_root_class))
+@interface NSValue
++ (NSValue *)valueWithBytes:(const void *)bytes objCType:(const char *)type;
+@end
+
+void doStuff(struct boxable b) {
+ id v = @(b);
+}
+
diff --git a/test/PCH/objc_boxable_record.h b/test/PCH/objc_boxable_record.h
new file mode 100644
index 000000000000..db2a3a8fc561
--- /dev/null
+++ b/test/PCH/objc_boxable_record.h
@@ -0,0 +1,5 @@
+// used with objc_boxable.m test
+struct boxable {
+ int dummy;
+};
+
diff --git a/test/PCH/objc_boxable_record_attr.h b/test/PCH/objc_boxable_record_attr.h
new file mode 100644
index 000000000000..96a1559846ad
--- /dev/null
+++ b/test/PCH/objc_boxable_record_attr.h
@@ -0,0 +1,3 @@
+// used with objc_boxable.m test
+typedef struct __attribute((objc_boxable)) boxable boxable;
+
diff --git a/test/Preprocessor/predefined-arch-macros.c b/test/Preprocessor/predefined-arch-macros.c
index 5126ef3d94cf..14fc49478af5 100644
--- a/test/Preprocessor/predefined-arch-macros.c
+++ b/test/Preprocessor/predefined-arch-macros.c
@@ -1748,3 +1748,12 @@
// RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_HTM
//
// CHECK_SYSTEMZ_HTM: #define __HTM__ 1
+//
+// RUN: %clang -fzvector -E -dM %s -o - 2>&1 \
+// RUN: -target s390x-unknown-linux \
+// RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_ZVECTOR
+// RUN: %clang -mzvector -E -dM %s -o - 2>&1 \
+// RUN: -target s390x-unknown-linux \
+// RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_ZVECTOR
+//
+// CHECK_SYSTEMZ_ZVECTOR: #define __VEC__ 10301
diff --git a/test/Sema/dllimport.c b/test/Sema/dllimport.c
index 3ca1baa299c1..d81aecd604c7 100644
--- a/test/Sema/dllimport.c
+++ b/test/Sema/dllimport.c
@@ -143,7 +143,7 @@ __declspec(dllimport) void redecl3(); // expected-note{{previous declaration is
void redecl4(); // expected-note{{previous declaration is here}}
void useRedecl4() { redecl4(); }
-__declspec(dllimport) void redecl4(); // expected-error{{redeclaration of 'redecl4' cannot add 'dllimport' attribute}}
+__declspec(dllimport) void redecl4(); // expected-warning{{redeclaration of 'redecl4' should not add 'dllimport' attribute}}
// Allow with a warning if the decl hasn't been used yet.
void redecl5(); // expected-note{{previous declaration is here}}
diff --git a/test/Sema/typo-correction.c b/test/Sema/typo-correction.c
index ff43064d5b75..4ef50570899c 100644
--- a/test/Sema/typo-correction.c
+++ b/test/Sema/typo-correction.c
@@ -49,3 +49,9 @@ extern double cabs(_Complex double z);
void fn1() {
cabs(errij); // expected-error {{use of undeclared identifier 'errij'}}
}
+
+extern long afunction(int); // expected-note {{'afunction' declared here}}
+void fn2() {
+ f(THIS_IS_AN_ERROR, // expected-error {{use of undeclared identifier 'THIS_IS_AN_ERROR'}}
+ afunction(afunction_)); // expected-error {{use of undeclared identifier 'afunction_'; did you mean 'afunction'?}}
+}
diff --git a/test/Sema/zvector.c b/test/Sema/zvector.c
new file mode 100644
index 000000000000..5220a629cfe1
--- /dev/null
+++ b/test/Sema/zvector.c
@@ -0,0 +1,1009 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -fzvector \
+// RUN: -fno-lax-vector-conversions -W -Wall -Wconversion \
+// RUN: -Werror -fsyntax-only -verify %s
+
+vector signed char sc, sc2;
+vector unsigned char uc, uc2;
+vector bool char bc, bc2;
+
+vector signed short ss, ss2;
+vector unsigned short us, us2;
+vector bool short bs, bs2;
+
+vector signed int si, si2;
+vector unsigned int ui, ui2;
+vector bool int bi, bi2;
+
+vector signed long long sl, sl2;
+vector unsigned long long ul, ul2;
+vector bool long long bl, bl2;
+
+vector double fd, fd2;
+
+vector long ll; // expected-error {{cannot use 'long' with '__vector'}}
+vector float ff; // expected-error {{cannot use 'float' with '__vector'}}
+
+signed char sc_scalar;
+unsigned char uc_scalar;
+
+signed short ss_scalar;
+unsigned short us_scalar;
+
+signed int si_scalar;
+unsigned int ui_scalar;
+
+signed long sl_scalar;
+unsigned long ul_scalar;
+
+double fd_scalar;
+
+void foo(void)
+{
+ // -------------------------------------------------------------------------
+ // Test assignment.
+ // -------------------------------------------------------------------------
+
+ sc = sc2;
+ uc = uc2;
+ bc = bc2;
+
+ ss = ss2;
+ us = us2;
+ bs = bs2;
+
+ si = si2;
+ ui = ui2;
+ bi = bi2;
+
+ sl = sl2;
+ ul = ul2;
+ bl = bl2;
+ fd = fd2;
+
+ sc = uc2; // expected-error {{incompatible type}}
+ sc = bc2; // expected-error {{incompatible type}}
+ uc = sc2; // expected-error {{incompatible type}}
+ uc = bc2; // expected-error {{incompatible type}}
+ bc = sc2; // expected-error {{incompatible type}}
+ bc = uc2; // expected-error {{incompatible type}}
+
+ sc = sc_scalar; // expected-error {{incompatible type}}
+ sc = uc_scalar; // expected-error {{incompatible type}}
+ uc = sc_scalar; // expected-error {{incompatible type}}
+ uc = uc_scalar; // expected-error {{incompatible type}}
+ bc = sc_scalar; // expected-error {{incompatible type}}
+ bc = uc_scalar; // expected-error {{incompatible type}}
+
+ sc = ss2; // expected-error {{incompatible type}}
+ sc = si2; // expected-error {{incompatible type}}
+ sc = sl2; // expected-error {{incompatible type}}
+ sc = fd2; // expected-error {{incompatible type}}
+
+ ss = sc2; // expected-error {{incompatible type}}
+ si = sc2; // expected-error {{incompatible type}}
+ sl = sc2; // expected-error {{incompatible type}}
+ fd = sc2; // expected-error {{incompatible type}}
+
+ uc = us2; // expected-error {{incompatible type}}
+ uc = ui2; // expected-error {{incompatible type}}
+ uc = ul2; // expected-error {{incompatible type}}
+ uc = fd2; // expected-error {{incompatible type}}
+
+ us = uc2; // expected-error {{incompatible type}}
+ ui = uc2; // expected-error {{incompatible type}}
+ ul = uc2; // expected-error {{incompatible type}}
+ fd = uc2; // expected-error {{incompatible type}}
+
+ bc = us2; // expected-error {{incompatible type}}
+ bc = ui2; // expected-error {{incompatible type}}
+ bc = ul2; // expected-error {{incompatible type}}
+ bc = fd2; // expected-error {{incompatible type}}
+
+ bs = bc2; // expected-error {{incompatible type}}
+ bi = bc2; // expected-error {{incompatible type}}
+ bl = bc2; // expected-error {{incompatible type}}
+ fd = bc2; // expected-error {{incompatible type}}
+
+ // -------------------------------------------------------------------------
+ // Test casts to same element width.
+ // -------------------------------------------------------------------------
+
+ sc = (vector signed char)bc2;
+ bc = (vector bool char)uc2;
+ uc = (vector unsigned char)sc2;
+
+ ss = (vector signed short)bs2;
+ bs = (vector bool short)us2;
+ us = (vector unsigned short)ss2;
+
+ si = (vector signed int)bi2;
+ bi = (vector bool int)ui2;
+ ui = (vector unsigned int)si2;
+
+ sl = (vector signed long long)bl2;
+ bl = (vector bool long long)ul2;
+ ul = (vector unsigned long long)fd2;
+ fd = (vector double)sl2;
+
+ // -------------------------------------------------------------------------
+ // Test casts to different element width.
+ // -------------------------------------------------------------------------
+
+ sc = (vector signed char)bs2;
+ bc = (vector bool char)us2;
+ uc = (vector unsigned char)fd2;
+
+ ss = (vector signed short)bi2;
+ bs = (vector bool short)ui2;
+ us = (vector unsigned short)fd2;
+
+ si = (vector signed int)bl2;
+ bi = (vector bool int)ul2;
+ ui = (vector unsigned int)fd2;
+
+ sl = (vector signed long long)bc2;
+ bl = (vector bool long long)uc2;
+ ul = (vector unsigned long long)sc2;
+ fd = (vector double)sc2;
+
+ // -------------------------------------------------------------------------
+ // Test ++.
+ // -------------------------------------------------------------------------
+
+ ++sc2;
+ ++uc2;
+ ++bc2; // expected-error {{cannot increment}}
+
+ ++ss2;
+ ++us2;
+ ++bs2; // expected-error {{cannot increment}}
+
+ ++si2;
+ ++ui2;
+ ++bi2; // expected-error {{cannot increment}}
+
+ ++sl2;
+ ++ul2;
+ ++bl2; // expected-error {{cannot increment}}
+
+ ++fd2;
+
+ sc++;
+ uc++;
+ bc++; // expected-error {{cannot increment}}
+
+ ss++;
+ us++;
+ bs++; // expected-error {{cannot increment}}
+
+ si++;
+ ui++;
+ bi++; // expected-error {{cannot increment}}
+
+ sl++;
+ ul++;
+ bl++; // expected-error {{cannot increment}}
+
+ fd++;
+
+ // -------------------------------------------------------------------------
+ // Test --.
+ // -------------------------------------------------------------------------
+
+ --sc2;
+ --uc2;
+ --bc2; // expected-error {{cannot decrement}}
+
+ --ss2;
+ --us2;
+ --bs2; // expected-error {{cannot decrement}}
+
+ --si2;
+ --ui2;
+ --bi2; // expected-error {{cannot decrement}}
+
+ --sl2;
+ --ul2;
+ --bl2; // expected-error {{cannot decrement}}
+
+ --fd2;
+
+ sc--;
+ uc--;
+ bc--; // expected-error {{cannot decrement}}
+
+ ss--;
+ us--;
+ bs--; // expected-error {{cannot decrement}}
+
+ si--;
+ ui--;
+ bi--; // expected-error {{cannot decrement}}
+
+ sl--;
+ ul--;
+ bl--; // expected-error {{cannot decrement}}
+
+ fd--;
+
+ // -------------------------------------------------------------------------
+ // Test unary +.
+ // -------------------------------------------------------------------------
+
+ sc = +sc2;
+ uc = +uc2;
+ bc = +bc2; // expected-error {{invalid argument type}}
+
+ ss = +ss2;
+ us = +us2;
+ bs = +bs2; // expected-error {{invalid argument type}}
+
+ si = +si2;
+ ui = +ui2;
+ bi = +bi2; // expected-error {{invalid argument type}}
+
+ sl = +sl2;
+ ul = +ul2;
+ bl = +bl2; // expected-error {{invalid argument type}}
+
+ fd = +fd2;
+
+ sc = +si2; // expected-error {{assigning to}}
+ ui = +si2; // expected-error {{assigning to}}
+ ui = +bi2; // expected-error {{invalid argument type}}
+
+ // -------------------------------------------------------------------------
+ // Test unary -.
+ // -------------------------------------------------------------------------
+
+ sc = -sc2;
+ uc = -uc2;
+ bc = -bc2; // expected-error {{invalid argument type}}
+
+ ss = -ss2;
+ us = -us2;
+ bs = -bs2; // expected-error {{invalid argument type}}
+
+ si = -si2;
+ ui = -ui2;
+ bi = -bi2; // expected-error {{invalid argument type}}
+
+ sl = -sl2;
+ ul = -ul2;
+ bl = -bl2; // expected-error {{invalid argument type}}
+
+ fd = -fd2;
+
+ sc = -si2; // expected-error {{assigning to}}
+ ui = -si2; // expected-error {{assigning to}}
+ ui = -bi2; // expected-error {{invalid argument type}}
+
+ // -------------------------------------------------------------------------
+ // Test ~.
+ // -------------------------------------------------------------------------
+
+ sc = ~sc2;
+ uc = ~uc2;
+ bc = ~bc2;
+
+ ss = ~ss2;
+ us = ~us2;
+ bs = ~bs2;
+
+ si = ~si2;
+ ui = ~ui2;
+ bi = ~bi2;
+
+ sl = ~sl2;
+ ul = ~ul2;
+ bl = ~bl2;
+
+ fd = ~fd2; // expected-error {{invalid argument}}
+
+ sc = ~si2; // expected-error {{assigning to}}
+ ui = ~si2; // expected-error {{assigning to}}
+ ui = ~bi2; // expected-error {{assigning to}}
+
+ // -------------------------------------------------------------------------
+ // Test binary +.
+ // -------------------------------------------------------------------------
+
+ sc = sc + sc2;
+ sc = sc + uc2; // expected-error {{can't convert}}
+ sc = uc + sc2; // expected-error {{can't convert}}
+ sc = sc + bc2;
+ sc = bc + sc2;
+
+ uc = uc + uc2;
+ uc = sc + uc2; // expected-error {{can't convert}}
+ uc = uc + sc2; // expected-error {{can't convert}}
+ uc = bc + uc2;
+ uc = uc + bc2;
+
+ bc = bc + bc2; // expected-error {{invalid operands}}
+ bc = bc + uc2; // expected-error {{incompatible type}}
+ bc = uc + bc2; // expected-error {{incompatible type}}
+ bc = bc + sc2; // expected-error {{incompatible type}}
+ bc = sc + bc2; // expected-error {{incompatible type}}
+
+ sc = sc + sc_scalar; // expected-error {{can't convert}}
+ sc = sc + uc_scalar; // expected-error {{can't convert}}
+ sc = sc_scalar + sc; // expected-error {{can't convert}}
+ sc = uc_scalar + sc; // expected-error {{can't convert}}
+ uc = uc + sc_scalar; // expected-error {{can't convert}}
+ uc = uc + uc_scalar; // expected-error {{can't convert}}
+ uc = sc_scalar + uc; // expected-error {{can't convert}}
+ uc = uc_scalar + uc; // expected-error {{can't convert}}
+
+ ss = ss + ss2;
+ us = us + us2;
+ bs = bs + bs2; // expected-error {{invalid operands}}
+
+ si = si + si2;
+ ui = ui + ui2;
+ bi = bi + bi2; // expected-error {{invalid operands}}
+
+ sl = sl + sl2;
+ ul = ul + ul2;
+ bl = bl + bl2; // expected-error {{invalid operands}}
+
+ fd = fd + fd2;
+ fd = fd + ul2; // expected-error {{can't convert}}
+ fd = sl + fd2; // expected-error {{can't convert}}
+
+ sc += sc2;
+ sc += uc2; // expected-error {{can't convert}}
+ sc += bc2;
+
+ uc += uc2;
+ uc += sc2; // expected-error {{can't convert}}
+ uc += bc2;
+
+ bc += bc2; // expected-error {{invalid operands}}
+ bc += sc2; // expected-error {{can't convert}}
+ bc += uc2; // expected-error {{can't convert}}
+
+ sc += ss2; // expected-error {{can't convert}}
+ sc += si2; // expected-error {{can't convert}}
+ sc += sl2; // expected-error {{can't convert}}
+ sc += fd2; // expected-error {{can't convert}}
+
+ sc += sc_scalar; // expected-error {{can't convert}}
+ sc += uc_scalar; // expected-error {{can't convert}}
+ uc += sc_scalar; // expected-error {{can't convert}}
+ uc += uc_scalar; // expected-error {{can't convert}}
+
+ ss += ss2;
+ us += us2;
+ bs += bs2; // expected-error {{invalid operands}}
+
+ si += si2;
+ ui += ui2;
+ bi += bi2; // expected-error {{invalid operands}}
+
+ sl += sl2;
+ ul += ul2;
+ bl += bl2; // expected-error {{invalid operands}}
+
+ fd += fd2;
+
+ // -------------------------------------------------------------------------
+ // Test that binary + rules apply to binary - too.
+ // -------------------------------------------------------------------------
+
+ sc = sc - sc2;
+ uc = uc - uc2;
+ bc = bc - bc2; // expected-error {{invalid operands}}
+
+ sc = uc - sc2; // expected-error {{can't convert}}
+ sc = sc - bc2;
+ uc = bc - uc2;
+
+ sc -= sc2;
+ uc -= uc2;
+ bc -= bc2; // expected-error {{invalid operands}}
+
+ sc -= uc2; // expected-error {{can't convert}}
+ uc -= bc2;
+ bc -= sc2; // expected-error {{can't convert}}
+
+ ss -= ss2;
+ us -= us2;
+ bs -= bs2; // expected-error {{invalid operands}}
+
+ si -= si2;
+ ui -= ui2;
+ bi -= bi2; // expected-error {{invalid operands}}
+
+ sl -= sl2;
+ ul -= ul2;
+ bl -= bl2; // expected-error {{invalid operands}}
+
+ fd -= fd2;
+
+ // -------------------------------------------------------------------------
+ // Test that binary + rules apply to * too. 64-bit integer multiplication
+ // is not required by the spec and so isn't tested here.
+ // -------------------------------------------------------------------------
+
+ sc = sc * sc2;
+ uc = uc * uc2;
+ bc = bc * bc2; // expected-error {{invalid operands}}
+
+ sc = uc * sc2; // expected-error {{can't convert}}
+ sc = sc * bc2; // expected-error {{can't convert}}
+ uc = bc * uc2; // expected-error {{can't convert}}
+
+ sc *= sc2;
+ uc *= uc2;
+ bc *= bc2; // expected-error {{invalid operands}}
+
+ sc *= uc2; // expected-error {{can't convert}}
+ uc *= bc2; // expected-error {{can't convert}}
+ bc *= sc2; // expected-error {{can't convert}}
+
+ ss *= ss2;
+ us *= us2;
+ bs *= bs2; // expected-error {{invalid operands}}
+
+ si *= si2;
+ ui *= ui2;
+ bi *= bi2; // expected-error {{invalid operands}}
+
+ sl *= sl2;
+ ul *= ul2;
+ bl *= bl2; // expected-error {{invalid operands}}
+
+ fd *= fd2;
+
+ // -------------------------------------------------------------------------
+ // Test that * rules apply to / too.
+ // -------------------------------------------------------------------------
+
+ sc = sc / sc2;
+ uc = uc / uc2;
+ bc = bc / bc2; // expected-error {{invalid operands}}
+
+ sc = uc / sc2; // expected-error {{can't convert}}
+ sc = sc / bc2; // expected-error {{can't convert}}
+ uc = bc / uc2; // expected-error {{can't convert}}
+
+ sc /= sc2;
+ uc /= uc2;
+ bc /= bc2; // expected-error {{invalid operands}}
+
+ sc /= uc2; // expected-error {{can't convert}}
+ uc /= bc2; // expected-error {{can't convert}}
+ bc /= sc2; // expected-error {{can't convert}}
+
+ ss /= ss2;
+ us /= us2;
+ bs /= bs2; // expected-error {{invalid operands}}
+
+ si /= si2;
+ ui /= ui2;
+ bi /= bi2; // expected-error {{invalid operands}}
+
+ sl /= sl2;
+ ul /= ul2;
+ bl /= bl2; // expected-error {{invalid operands}}
+
+ fd /= fd2;
+
+ // -------------------------------------------------------------------------
+ // Test that / rules apply to % too, except that doubles are not allowed.
+ // -------------------------------------------------------------------------
+
+ sc = sc % sc2;
+ uc = uc % uc2;
+ bc = bc % bc2; // expected-error {{invalid operands}}
+
+ sc = uc % sc2; // expected-error {{can't convert}}
+ sc = sc % bc2; // expected-error {{can't convert}}
+ uc = bc % uc2; // expected-error {{can't convert}}
+
+ sc %= sc2;
+ uc %= uc2;
+ bc %= bc2; // expected-error {{invalid operands}}
+
+ sc %= uc2; // expected-error {{can't convert}}
+ uc %= bc2; // expected-error {{can't convert}}
+ bc %= sc2; // expected-error {{can't convert}}
+
+ ss %= ss2;
+ us %= us2;
+ bs %= bs2; // expected-error {{invalid operands}}
+
+ si %= si2;
+ ui %= ui2;
+ bi %= bi2; // expected-error {{invalid operands}}
+
+ sl %= sl2;
+ ul %= ul2;
+ bl %= bl2; // expected-error {{invalid operands}}
+
+ fd %= fd2; // expected-error {{invalid operands}}
+
+ // -------------------------------------------------------------------------
+ // Test &.
+ // -------------------------------------------------------------------------
+
+ sc = sc & sc2;
+ sc = sc & uc2; // expected-error {{can't convert}}
+ sc = uc & sc2; // expected-error {{can't convert}}
+ sc = sc & bc2;
+ sc = bc & sc2;
+
+ uc = uc & uc2;
+ uc = sc & uc2; // expected-error {{can't convert}}
+ uc = uc & sc2; // expected-error {{can't convert}}
+ uc = bc & uc2;
+ uc = uc & bc2;
+
+ bc = bc & bc2;
+ bc = bc & uc2; // expected-error {{incompatible type}}
+ bc = uc & bc2; // expected-error {{incompatible type}}
+ bc = bc & sc2; // expected-error {{incompatible type}}
+ bc = sc & bc2; // expected-error {{incompatible type}}
+
+ fd = fd & fd2; // expected-error {{invalid operands}}
+ fd = bl & fd2; // expected-error {{invalid operands}}
+ fd = fd & bl2; // expected-error {{invalid operands}}
+ fd = fd & sl2; // expected-error {{invalid operands}}
+ fd = fd & ul2; // expected-error {{invalid operands}}
+
+ sc &= sc2;
+ sc &= uc2; // expected-error {{can't convert}}
+ sc &= bc2;
+
+ uc &= uc2;
+ uc &= sc2; // expected-error {{can't convert}}
+ uc &= bc2;
+
+ bc &= bc2;
+ bc &= sc2; // expected-error {{can't convert}}
+ bc &= uc2; // expected-error {{can't convert}}
+
+ sc &= ss2; // expected-error {{can't convert}}
+ sc &= si2; // expected-error {{can't convert}}
+ sc &= sl2; // expected-error {{can't convert}}
+ sc &= fd2; // expected-error {{invalid operands}}
+
+ us &= bc2; // expected-error {{can't convert}}
+ ui &= bc2; // expected-error {{can't convert}}
+ ul &= bc2; // expected-error {{can't convert}}
+ fd &= bc2; // expected-error {{invalid operands}}
+
+ ss &= ss2;
+ us &= us2;
+ bs &= bs2;
+
+ si &= si2;
+ ui &= ui2;
+ bi &= bi2;
+
+ sl &= sl2;
+ ul &= ul2;
+ bl &= bl2;
+
+ // -------------------------------------------------------------------------
+ // Test that & rules apply to | too.
+ // -------------------------------------------------------------------------
+
+ sc = sc | sc2;
+ sc = sc | uc2; // expected-error {{can't convert}}
+ sc = sc | bc2;
+
+ uc = uc | uc2;
+ uc = sc | uc2; // expected-error {{can't convert}}
+ uc = bc | uc2;
+
+ bc = bc | bc2;
+ bc = uc | bc2; // expected-error {{incompatible type}}
+ bc = bc | sc2; // expected-error {{incompatible type}}
+
+ fd = fd | fd2; // expected-error {{invalid operands}}
+ fd = bl | fd2; // expected-error {{invalid operands}}
+
+ ss |= ss2;
+ us |= us2;
+ bs |= bs2;
+
+ si |= si2;
+ ui |= ui2;
+ bi |= bi2;
+
+ sl |= sl2;
+ ul |= ul2;
+ bl |= bl2;
+
+ fd |= bl2; // expected-error {{invalid operands}}
+ fd |= fd2; // expected-error {{invalid operands}}
+
+ // -------------------------------------------------------------------------
+ // Test that & rules apply to ^ too.
+ // -------------------------------------------------------------------------
+
+ sc = sc ^ sc2;
+ sc = sc ^ uc2; // expected-error {{can't convert}}
+ sc = sc ^ bc2;
+
+ uc = uc ^ uc2;
+ uc = sc ^ uc2; // expected-error {{can't convert}}
+ uc = bc ^ uc2;
+
+ bc = bc ^ bc2;
+ bc = uc ^ bc2; // expected-error {{incompatible type}}
+ bc = bc ^ sc2; // expected-error {{incompatible type}}
+
+ fd = fd ^ fd2; // expected-error {{invalid operands}}
+ fd = bl ^ fd2; // expected-error {{invalid operands}}
+
+ ss ^= ss2;
+ us ^= us2;
+ bs ^= bs2;
+
+ si ^= si2;
+ ui ^= ui2;
+ bi ^= bi2;
+
+ sl ^= sl2;
+ ul ^= ul2;
+ bl ^= bl2;
+
+ fd ^= bl2; // expected-error {{invalid operands}}
+ fd ^= fd2; // expected-error {{invalid operands}}
+
+ // -------------------------------------------------------------------------
+ // Test <<.
+ // -------------------------------------------------------------------------
+
+ sc = sc << sc2;
+ sc = sc << uc2;
+ sc = uc << sc2; // expected-error {{incompatible type}}
+ sc = sc << bc2; // expected-error {{invalid operands}}
+ sc = bc << sc2; // expected-error {{invalid operands}}
+
+ uc = uc << uc2;
+ uc = sc << uc2; // expected-error {{assigning to}}
+ uc = uc << sc2;
+ uc = bc << uc2; // expected-error {{invalid operands}}
+ uc = uc << bc2; // expected-error {{invalid operands}}
+
+ bc = bc << bc2; // expected-error {{invalid operands}}
+ bc = bc << uc2; // expected-error {{invalid operands}}
+ bc = uc << bc2; // expected-error {{invalid operands}}
+ bc = bc << sc2; // expected-error {{invalid operands}}
+ bc = sc << bc2; // expected-error {{invalid operands}}
+
+ sc = sc << 1;
+ sc = sc << 1.0f; // expected-error {{integer is required}}
+ sc = sc << sc_scalar;
+ sc = sc << uc_scalar;
+ sc = sc << ss_scalar;
+ sc = sc << us_scalar;
+ sc = sc << si_scalar;
+ sc = sc << ui_scalar;
+ sc = sc << sl_scalar;
+ sc = sc << ul_scalar;
+ sc = sc_scalar << sc; // expected-error {{first operand is not a vector}}
+ sc = uc_scalar << sc; // expected-error {{first operand is not a vector}}
+ uc = uc << sc_scalar;
+ uc = uc << uc_scalar;
+ uc = sc_scalar << uc; // expected-error {{first operand is not a vector}}
+ uc = uc_scalar << uc; // expected-error {{first operand is not a vector}}
+
+ ss = ss << ss2;
+ ss = ss << ss_scalar;
+ us = us << us2;
+ us = us << us_scalar;
+ bs = bs << bs2; // expected-error {{invalid operands}}
+
+ si = si << si2;
+ si = si << si_scalar;
+ ui = ui << ui2;
+ ui = ui << ui_scalar;
+ bi = bi << bi2; // expected-error {{invalid operands}}
+
+ sl = sl << sl2;
+ sl = sl << sl_scalar;
+ ul = ul << ul2;
+ ul = ul << ul_scalar;
+ bl = bl << bl2; // expected-error {{invalid operands}}
+
+ fd = fd << fd2; // expected-error {{integer is required}}
+ fd = fd << ul2; // expected-error {{integer is required}}
+ fd = sl << fd2; // expected-error {{integer is required}}
+
+ sc <<= sc2;
+ sc <<= uc2;
+ sc <<= bc2; // expected-error {{invalid operands}}
+ sc <<= sc_scalar;
+
+ uc <<= uc2;
+ uc <<= sc2;
+ uc <<= bc2; // expected-error {{invalid operands}}
+ uc <<= uc_scalar;
+
+ bc <<= bc2; // expected-error {{invalid operands}}
+ bc <<= sc2; // expected-error {{invalid operands}}
+ bc <<= uc2; // expected-error {{invalid operands}}
+
+ sc <<= ss2; // expected-error {{vector operands do not have the same number of elements}}
+ sc <<= si2; // expected-error {{vector operands do not have the same number of elements}}
+ sc <<= sl2; // expected-error {{vector operands do not have the same number of elements}}
+ sc <<= fd2; // expected-error {{integer is required}}
+
+ ss <<= ss2;
+ ss <<= ss_scalar;
+ us <<= us2;
+ us <<= us_scalar;
+ bs <<= bs2; // expected-error {{invalid operands}}
+
+ si <<= si2;
+ si <<= si_scalar;
+ ui <<= ui2;
+ ui <<= ui_scalar;
+ bi <<= bi2; // expected-error {{invalid operands}}
+
+ sl <<= sl2;
+ sl <<= sl_scalar;
+ ul <<= ul2;
+ ul <<= ul_scalar;
+ bl <<= bl2; // expected-error {{invalid operands}}
+
+ fd <<= fd2; // expected-error {{integer is required}}
+
+ // -------------------------------------------------------------------------
+ // Test >>.
+ // -------------------------------------------------------------------------
+
+ sc = sc >> sc2;
+ sc = sc >> uc2;
+ sc = uc >> sc2; // expected-error {{incompatible type}}
+ sc = sc >> bc2; // expected-error {{invalid operands}}
+ sc = bc >> sc2; // expected-error {{invalid operands}}
+
+ uc = uc >> uc2;
+ uc = sc >> uc2; // expected-error {{assigning to}}
+ uc = uc >> sc2;
+ uc = bc >> uc2; // expected-error {{invalid operands}}
+ uc = uc >> bc2; // expected-error {{invalid operands}}
+
+ bc = bc >> bc2; // expected-error {{invalid operands}}
+ bc = bc >> uc2; // expected-error {{invalid operands}}
+ bc = uc >> bc2; // expected-error {{invalid operands}}
+ bc = bc >> sc2; // expected-error {{invalid operands}}
+ bc = sc >> bc2; // expected-error {{invalid operands}}
+
+ sc = sc >> 1;
+ sc = sc >> 1.0f; // expected-error {{integer is required}}
+ sc = sc >> sc_scalar;
+ sc = sc >> uc_scalar;
+ sc = sc >> ss_scalar;
+ sc = sc >> us_scalar;
+ sc = sc >> si_scalar;
+ sc = sc >> ui_scalar;
+ sc = sc >> sl_scalar;
+ sc = sc >> ul_scalar;
+ sc = sc_scalar >> sc; // expected-error {{first operand is not a vector}}
+ sc = uc_scalar >> sc; // expected-error {{first operand is not a vector}}
+ uc = uc >> sc_scalar;
+ uc = uc >> uc_scalar;
+ uc = sc_scalar >> uc; // expected-error {{first operand is not a vector}}
+ uc = uc_scalar >> uc; // expected-error {{first operand is not a vector}}
+
+ ss = ss >> ss2;
+ ss = ss >> ss_scalar;
+ us = us >> us2;
+ us = us >> us_scalar;
+ bs = bs >> bs2; // expected-error {{invalid operands}}
+
+ si = si >> si2;
+ si = si >> si_scalar;
+ ui = ui >> ui2;
+ ui = ui >> ui_scalar;
+ bi = bi >> bi2; // expected-error {{invalid operands}}
+
+ sl = sl >> sl2;
+ sl = sl >> sl_scalar;
+ ul = ul >> ul2;
+ ul = ul >> ul_scalar;
+ bl = bl >> bl2; // expected-error {{invalid operands}}
+
+ fd = fd >> fd2; // expected-error {{integer is required}}
+ fd = fd >> ul2; // expected-error {{integer is required}}
+ fd = sl >> fd2; // expected-error {{integer is required}}
+
+ sc >>= sc2;
+ sc >>= uc2;
+ sc >>= bc2; // expected-error {{invalid operands}}
+ sc >>= sc_scalar;
+
+ uc >>= uc2;
+ uc >>= sc2;
+ uc >>= bc2; // expected-error {{invalid operands}}
+ uc >>= uc_scalar;
+
+ bc >>= bc2; // expected-error {{invalid operands}}
+ bc >>= sc2; // expected-error {{invalid operands}}
+ bc >>= uc2; // expected-error {{invalid operands}}
+
+ sc >>= ss2; // expected-error {{vector operands do not have the same number of elements}}
+ sc >>= si2; // expected-error {{vector operands do not have the same number of elements}}
+ sc >>= sl2; // expected-error {{vector operands do not have the same number of elements}}
+ sc >>= fd2; // expected-error {{integer is required}}
+
+ ss >>= ss2;
+ ss >>= ss_scalar;
+ us >>= us2;
+ us >>= us_scalar;
+ bs >>= bs2; // expected-error {{invalid operands}}
+
+ si >>= si2;
+ si >>= si_scalar;
+ ui >>= ui2;
+ ui >>= ui_scalar;
+ bi >>= bi2; // expected-error {{invalid operands}}
+
+ sl >>= sl2;
+ sl >>= sl_scalar;
+ ul >>= ul2;
+ ul >>= ul_scalar;
+ bl >>= bl2; // expected-error {{invalid operands}}
+
+ fd >>= fd2; // expected-error {{integer is required}}
+
+ // -------------------------------------------------------------------------
+ // Test ==.
+ // -------------------------------------------------------------------------
+
+ (void)(sc == sc2);
+ (void)(uc == uc2);
+ (void)(bc == bc2);
+
+ (void)(sc == uc); // expected-error {{can't convert}}
+ (void)(sc == bc);
+
+ (void)(uc == sc); // expected-error {{can't convert}}
+ (void)(uc == bc);
+
+ (void)(bc == sc);
+ (void)(bc == uc);
+
+ (void)(ss == ss2);
+ (void)(us == us2);
+ (void)(bs == bs2);
+
+ (void)(si == si2);
+ (void)(ui == ui2);
+ (void)(bi == bi2);
+
+ (void)(sl == sl2);
+ (void)(ul == ul2);
+ (void)(bl == bl2);
+ (void)(fd == fd2);
+
+ (void)(fd == ul); // expected-error {{can't convert}}
+ (void)(ul == fd); // expected-error {{can't convert}}
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to != too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc != sc2);
+ (void)(uc != uc2);
+ (void)(bc != bc2);
+
+ (void)(sc != uc); // expected-error {{can't convert}}
+ (void)(sc != bc);
+
+ (void)(ss != ss2);
+ (void)(us != us2);
+ (void)(bs != bs2);
+
+ (void)(si != si2);
+ (void)(ui != ui2);
+ (void)(bi != bi2);
+
+ (void)(sl != sl2);
+ (void)(ul != ul2);
+ (void)(bl != bl2);
+ (void)(fd != fd2);
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to <= too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc <= sc2);
+ (void)(uc <= uc2);
+ (void)(bc <= bc2);
+
+ (void)(sc <= uc); // expected-error {{can't convert}}
+ (void)(sc <= bc);
+
+ (void)(ss <= ss2);
+ (void)(us <= us2);
+ (void)(bs <= bs2);
+
+ (void)(si <= si2);
+ (void)(ui <= ui2);
+ (void)(bi <= bi2);
+
+ (void)(sl <= sl2);
+ (void)(ul <= ul2);
+ (void)(bl <= bl2);
+ (void)(fd <= fd2);
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to >= too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc >= sc2);
+ (void)(uc >= uc2);
+ (void)(bc >= bc2);
+
+ (void)(sc >= uc); // expected-error {{can't convert}}
+ (void)(sc >= bc);
+
+ (void)(ss >= ss2);
+ (void)(us >= us2);
+ (void)(bs >= bs2);
+
+ (void)(si >= si2);
+ (void)(ui >= ui2);
+ (void)(bi >= bi2);
+
+ (void)(sl >= sl2);
+ (void)(ul >= ul2);
+ (void)(bl >= bl2);
+ (void)(fd >= fd2);
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to < too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc < sc2);
+ (void)(uc < uc2);
+ (void)(bc < bc2);
+
+ (void)(sc < uc); // expected-error {{can't convert}}
+ (void)(sc < bc);
+
+ (void)(ss < ss2);
+ (void)(us < us2);
+ (void)(bs < bs2);
+
+ (void)(si < si2);
+ (void)(ui < ui2);
+ (void)(bi < bi2);
+
+ (void)(sl < sl2);
+ (void)(ul < ul2);
+ (void)(bl < bl2);
+ (void)(fd < fd2);
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to > too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc > sc2);
+ (void)(uc > uc2);
+ (void)(bc > bc2);
+
+ (void)(sc > uc); // expected-error {{can't convert}}
+ (void)(sc > bc);
+
+ (void)(ss > ss2);
+ (void)(us > us2);
+ (void)(bs > bs2);
+
+ (void)(si > si2);
+ (void)(ui > ui2);
+ (void)(bi > bi2);
+
+ (void)(sl > sl2);
+ (void)(ul > ul2);
+ (void)(bl > bl2);
+ (void)(fd > fd2);
+}
diff --git a/test/SemaCXX/warn-pessmizing-move.cpp b/test/SemaCXX/warn-pessmizing-move.cpp
index 6b49944f2878..71e99dca007b 100644
--- a/test/SemaCXX/warn-pessmizing-move.cpp
+++ b/test/SemaCXX/warn-pessmizing-move.cpp
@@ -23,10 +23,6 @@ A test1(A a1) {
return a1;
return a2;
return std::move(a1);
- // expected-warning@-1{{prevents copy elision}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(a2);
// expected-warning@-1{{prevents copy elision}}
// expected-note@-2{{remove std::move call}}
@@ -46,10 +42,6 @@ B test2(A a1, B b1) {
return b1;
return b2;
return std::move(b1);
- // expected-warning@-1{{prevents copy elision}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(b2);
// expected-warning@-1{{prevents copy elision}}
// expected-note@-2{{remove std::move call}}
@@ -163,9 +155,10 @@ A test7() {
#define wrap1(x) x
#define wrap2(x) x
-// Macro test. Since the std::move call is outside the macro, it is
+// Macro test. Since the std::move call is outside the macro, it is
// safe to suggest a fix-it.
-A test8(A a) {
+A test8() {
+ A a;
return std::move(a);
// expected-warning@-1{{prevents copy elision}}
// expected-note@-2{{remove std::move call}}
@@ -184,7 +177,8 @@ A test8(A a) {
}
#define test9 \
- A test9(A a) { \
+ A test9() { \
+ A a; \
return std::move(a); \
}
@@ -196,8 +190,40 @@ test9
#define return_a return std::move(a)
// Macro test. The std::call is inside the macro, so no fix-it is suggested.
-A test10(A a) {
+A test10() {
+ A a;
return_a;
// expected-warning@-1{{prevents copy elision}}
// CHECK-NOT: fix-it
}
+
+namespace templates {
+ struct A {};
+ struct B { B(A); };
+
+ // Warn once here since the type is not dependent.
+ template <typename T>
+ A test1() {
+ A a;
+ return std::move(a);
+ // expected-warning@-1{{prevents copy elision}}
+ // expected-note@-2{{remove std::move call}}
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:12-[[@LINE-3]]:22}:""
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:23-[[@LINE-4]]:24}:""
+ }
+ void run_test1() {
+ test1<A>();
+ test1<B>();
+ }
+
+ // T1 and T2 may not be the same, the warning may not always apply.
+ template <typename T1, typename T2>
+ T1 test2() {
+ T2 t;
+ return std::move(t);
+ }
+ void run_test2() {
+ test2<A, A>();
+ test2<B, A>();
+ }
+}
diff --git a/test/SemaCXX/warn-redundant-move.cpp b/test/SemaCXX/warn-redundant-move.cpp
index feb9e6f408d9..abfb001fa8e6 100644
--- a/test/SemaCXX/warn-redundant-move.cpp
+++ b/test/SemaCXX/warn-redundant-move.cpp
@@ -1,5 +1,6 @@
// RUN: %clang_cc1 -fsyntax-only -Wredundant-move -std=c++11 -verify %s
// RUN: %clang_cc1 -fsyntax-only -Wredundant-move -std=c++11 -fdiagnostics-parseable-fixits %s 2>&1 | FileCheck %s
+// RUN: %clang_cc1 -fsyntax-only -std=c++11 %s -ast-dump | FileCheck %s --check-prefix=CHECK-AST
// definitions for std::move
namespace std {
@@ -12,6 +13,7 @@ template <class T> typename remove_reference<T>::type &&move(T &&t);
}
}
+// test1 and test2 should not warn until after implementation of DR1579.
struct A {};
struct B : public A {};
@@ -20,15 +22,7 @@ A test1(B b1) {
return b1;
return b2;
return std::move(b1);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(b2);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
}
struct C {
@@ -46,25 +40,9 @@ C test2(A a1, B b1) {
return b2;
return std::move(a1);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(a2);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(b1);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(b2);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
}
// Copy of tests above with types changed to reference types.
@@ -90,3 +68,49 @@ C test4(A& a1, B& b1) {
return std::move(b1);
return std::move(b2);
}
+
+// PR23819, case 2
+struct D {};
+D test5(D d) {
+ return d;
+ // Verify the implicit move from the AST dump
+ // CHECK-AST: ReturnStmt{{.*}}line:[[@LINE-2]]
+ // CHECK-AST-NEXT: CXXConstructExpr{{.*}}struct D{{.*}}void (struct D &&)
+ // CHECK-AST-NEXT: ImplicitCastExpr
+ // CHECK-AST-NEXT: DeclRefExpr{{.*}}ParmVar{{.*}}'d'
+
+ return std::move(d);
+ // expected-warning@-1{{redundant move in return statement}}
+ // expected-note@-2{{remove std::move call here}}
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:21-[[@LINE-4]]:22}:""
+}
+
+namespace templates {
+ struct A {};
+ struct B { B(A); };
+
+ // Warn once here since the type is not dependent.
+ template <typename T>
+ A test1(A a) {
+ return std::move(a);
+ // expected-warning@-1{{redundant move in return statement}}
+ // expected-note@-2{{remove std::move call here}}
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:12-[[@LINE-3]]:22}:""
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:23-[[@LINE-4]]:24}:""
+ }
+ void run_test1() {
+ test1<A>(A());
+ test1<B>(A());
+ }
+
+ // T1 and T2 may not be the same, the warning may not always apply.
+ template <typename T1, typename T2>
+ T1 test2(T2 t) {
+ return std::move(t);
+ }
+ void run_test2() {
+ test2<A, A>(A());
+ test2<B, A>(A());
+ }
+}
diff --git a/test/SemaObjC/circular-container.m b/test/SemaObjC/circular-container.m
index 1a2a24e8985a..1f06595f77be 100644
--- a/test/SemaObjC/circular-container.m
+++ b/test/SemaObjC/circular-container.m
@@ -144,3 +144,64 @@ void checkNSMutableOrderedSet() {
[s replaceObjectAtIndex:0 withObject:s]; // expected-warning {{adding 's' to 's' might cause circular dependency in container}}
}
+// Test subclassing
+
+@interface FootableSet : NSMutableSet
+@end
+
+@implementation FootableSet
+- (void)meth {
+ [super addObject:self]; // expected-warning {{adding 'self' to 'super' might cause circular dependency in container}}
+ [super addObject:nil]; // no-warning
+ [self addObject:self]; // expected-warning {{adding 'self' to 'self' might cause circular dependency in container}}
+}
+@end
+
+@interface FootableArray : NSMutableArray
+@end
+
+@implementation FootableArray
+- (void)meth {
+ [super addObject:self]; // expected-warning {{adding 'self' to 'super' might cause circular dependency in container}}
+ [super addObject:nil]; // no-warning
+ [self addObject:self]; // expected-warning {{adding 'self' to 'self' might cause circular dependency in container}}
+}
+@end
+
+@interface FootableDictionary : NSMutableDictionary
+@end
+
+@implementation FootableDictionary
+- (void)meth {
+ [super setObject:self forKey:@"key"]; // expected-warning {{adding 'self' to 'super' might cause circular dependency in container}}
+ [super setObject:nil forKey:@"key"]; // no-warning
+ [self setObject:self forKey:@"key"]; // expected-warning {{adding 'self' to 'self' might cause circular dependency in container}}
+}
+@end
+
+
+void subclassingNSMutableArray() {
+ FootableArray *a = nil; // expected-note 5 {{'a' declared here}} 5
+
+ [a addObject:a]; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+ [a insertObject:a atIndex:0]; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+ [a replaceObjectAtIndex:0 withObject:a]; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+ [a setObject:a atIndexedSubscript:0]; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+ a[0] = a; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+}
+
+void subclassingNSMutableDictionary() {
+ FootableDictionary *d = nil; // expected-note 4 {{'d' declared here}}
+
+ [d setObject:d forKey:@"key"]; // expected-warning {{adding 'd' to 'd' might cause circular dependency in container}}
+ [d setObject:d forKeyedSubscript:@"key"]; // expected-warning {{adding 'd' to 'd' might cause circular dependency in container}}
+ [d setValue:d forKey:@"key"]; // expected-warning {{adding 'd' to 'd' might cause circular dependency in container}}
+ d[@"key"] = d; // expected-warning {{adding 'd' to 'd' might cause circular dependency in container}}
+}
+
+void subclassingNSMutableSet() {
+ FootableSet *s = nil; // expected-note {{'s' declared here}}
+
+ [s addObject:s]; // expected-warning {{adding 's' to 's' might cause circular dependency in container}}
+}
+
diff --git a/test/SemaObjC/comptypes-9.m b/test/SemaObjC/comptypes-9.m
index cc6932dcdbf3..7064f167aaf3 100644
--- a/test/SemaObjC/comptypes-9.m
+++ b/test/SemaObjC/comptypes-9.m
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only %s
-// FIXME: This test case tests the patch applied in: http://lists.cs.uiuc.edu/pipermail/cfe-commits/Week-of-Mon-20080602/006017.html
+// FIXME: This test case tests the patch applied in: http://lists.llvm.org/pipermail/cfe-commits/Week-of-Mon-20080602/006017.html
// Eventually that logic should be treated as an extension.
typedef signed char BOOL;
diff --git a/test/SemaObjC/objc-boxed-expressions-nsvalue.m b/test/SemaObjC/objc-boxed-expressions-nsvalue.m
new file mode 100644
index 000000000000..1599f28c7944
--- /dev/null
+++ b/test/SemaObjC/objc-boxed-expressions-nsvalue.m
@@ -0,0 +1,101 @@
+// RUN: %clang_cc1 -fsyntax-only -triple x86_64-apple-macosx10.9 -verify %s
+
+#define BOXABLE __attribute__((objc_boxable))
+
+typedef struct BOXABLE _NSPoint {
+ int dummy;
+} NSPoint;
+
+typedef struct BOXABLE _NSSize {
+ int dummy;
+} NSSize;
+
+typedef struct BOXABLE _NSRect {
+ int dummy;
+} NSRect;
+
+typedef struct BOXABLE _CGPoint {
+ int dummy;
+} CGPoint;
+
+typedef struct BOXABLE _CGSize {
+ int dummy;
+} CGSize;
+
+typedef struct BOXABLE _CGRect {
+ int dummy;
+} CGRect;
+
+typedef struct BOXABLE _NSRange {
+ int dummy;
+} NSRange;
+
+struct _NSEdgeInsets {
+ int dummy;
+};
+
+typedef struct BOXABLE _NSEdgeInsets NSEdgeInsets;
+
+typedef struct _SomeStruct {
+ double d;
+} SomeStruct;
+
+typedef union BOXABLE _BoxableUnion {
+ int dummy;
+} BoxableUnion;
+
+void checkNSValueDiagnostic() {
+ NSRect rect;
+ id value = @(rect); // expected-error{{NSValue must be available to use Objective-C boxed expressions}}
+}
+
+@interface NSValue
++ (NSValue *)valueWithBytes:(const void *)value objCType:(const char *)type;
+@end
+
+int main() {
+ NSPoint ns_point;
+ id ns_point_value = @(ns_point);
+
+ NSSize ns_size;
+ id ns_size_value = @(ns_size);
+
+ NSRect ns_rect;
+ id ns_rect_value = @(ns_rect);
+
+ CGPoint cg_point;
+ id cg_point_value = @(cg_point);
+
+ CGSize cg_size;
+ id cg_size_value = @(cg_size);
+
+ CGRect cg_rect;
+ id cg_rect_value = @(cg_rect);
+
+ NSRange ns_range;
+ id ns_range_value = @(ns_range);
+
+ NSEdgeInsets edge_insets;
+ id edge_insets_object = @(edge_insets);
+
+ BoxableUnion boxable_union;
+ id boxed_union = @(boxable_union);
+
+ SomeStruct s;
+ id err = @(s); // expected-error{{illegal type 'SomeStruct' (aka 'struct _SomeStruct') used in a boxed expression}}
+}
+
+CGRect getRect() {
+ CGRect r;
+ return r;
+}
+
+SomeStruct getSomeStruct() {
+ SomeStruct s;
+ return s;
+}
+
+void rvalue() {
+ id rv_rect = @(getRect());
+ id rv_some_struct = @(getSomeStruct()); // expected-error {{illegal type 'SomeStruct' (aka 'struct _SomeStruct') used in a boxed expression}}
+}
diff --git a/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm b/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
new file mode 100644
index 000000000000..375b9555fcaf
--- /dev/null
+++ b/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
@@ -0,0 +1,118 @@
+// RUN: %clang_cc1 -fsyntax-only -triple x86_64-apple-macosx10.9 -verify %s
+
+#define BOXABLE __attribute__((objc_boxable))
+
+typedef struct BOXABLE _NSPoint {
+ int dummy;
+} NSPoint;
+
+typedef struct BOXABLE _NSSize {
+ int dummy;
+} NSSize;
+
+typedef struct BOXABLE _NSRect {
+ int dummy;
+} NSRect;
+
+typedef struct BOXABLE _CGPoint {
+ int dummy;
+} CGPoint;
+
+typedef struct BOXABLE _CGSize {
+ int dummy;
+} CGSize;
+
+typedef struct BOXABLE _CGRect {
+ int dummy;
+} CGRect;
+
+typedef struct BOXABLE _NSRange {
+ int dummy;
+} NSRange;
+
+typedef struct BOXABLE _NSEdgeInsets {
+ int dummy;
+} NSEdgeInsets;
+
+typedef struct BOXABLE _NSEdgeInsets NSEdgeInsets;
+
+typedef struct _SomeStruct {
+ double d;
+} SomeStruct;
+
+struct BOXABLE NonTriviallyCopyable {
+ double d;
+ NonTriviallyCopyable() {}
+ NonTriviallyCopyable(const NonTriviallyCopyable &obj) {}
+};
+
+void checkNSValueDiagnostic() {
+ NSRect rect;
+ id value = @(rect); // expected-error{{NSValue must be available to use Objective-C boxed expressions}}
+}
+
+@interface NSValue
++ (NSValue *)valueWithBytes:(const void *)value objCType:(const char *)type;
+@end
+
+int main() {
+ NSPoint ns_point;
+ id ns_point_value = @(ns_point);
+
+ NSSize ns_size;
+ id ns_size_value = @(ns_size);
+
+ NSRect ns_rect;
+ id ns_rect_value = @(ns_rect);
+
+ CGPoint cg_point;
+ id cg_point_value = @(cg_point);
+
+ CGSize cg_size;
+ id cg_size_value = @(cg_size);
+
+ CGRect cg_rect;
+ id cg_rect_value = @(cg_rect);
+
+ NSRange ns_range;
+ id ns_range_value = @(ns_range);
+
+ NSEdgeInsets edge_insets;
+ id edge_insets_object = @(edge_insets);
+
+ SomeStruct s;
+ id err = @(s); // expected-error{{illegal type 'SomeStruct' (aka '_SomeStruct') used in a boxed expression}}
+
+ NonTriviallyCopyable ntc;
+ id ntcErr = @(ntc); // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
+}
+
+CGRect getRect() {
+ CGRect r;
+ return r;
+}
+
+SomeStruct getSomeStruct() {
+ SomeStruct s;
+ return s;
+}
+
+void rvalue() {
+ id rv_rect = @(getRect());
+ id rv_some_struct = @(getSomeStruct()); // expected-error {{illegal type 'SomeStruct' (aka '_SomeStruct') used in a boxed expression}}
+}
+
+template <class T> id box(T value) { return @(value); } // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
+void test_template_1(NSRect rect, NonTriviallyCopyable ntc) {
+ id x = box(rect);
+ id y = box(ntc); // expected-note{{in instantiation of function template specialization 'box<NonTriviallyCopyable>' requested here}}
+}
+
+template <unsigned i> id boxRect(NSRect rect) { return @(rect); }
+template <unsigned i> id boxNTC(NonTriviallyCopyable ntc) { return @(ntc); } // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
+void test_template_2(NSRect rect, NonTriviallyCopyable ntc) {
+ id x = boxRect<0>(rect);
+ id y = boxNTC<0>(ntc);
+}
+
+
diff --git a/test/SemaTemplate/crash-unparsed-exception.cpp b/test/SemaTemplate/crash-unparsed-exception.cpp
new file mode 100644
index 000000000000..1226b35deb70
--- /dev/null
+++ b/test/SemaTemplate/crash-unparsed-exception.cpp
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify -fcxx-exceptions -fexceptions %s
+
+struct A {
+ virtual ~A();
+};
+template <class>
+struct B {};
+struct C {
+ template <typename>
+ struct D {
+ ~D() throw();
+ };
+ struct E : A {
+ D<int> d; //expected-error{{exception specification is not available until end of class definition}}
+ };
+ B<int> b; //expected-note{{in instantiation of template class 'B<int>' requested here}}
+};
diff --git a/tools/arcmt-test/arcmt-test.cpp b/tools/arcmt-test/arcmt-test.cpp
index c269e017f759..7c8e46aee067 100644
--- a/tools/arcmt-test/arcmt-test.cpp
+++ b/tools/arcmt-test/arcmt-test.cpp
@@ -132,7 +132,7 @@ static bool checkForMigration(StringRef resourcesPath,
return false;
arcmt::checkForManualIssues(CI, CI.getFrontendOpts().Inputs[0],
- std::make_shared<RawPCHContainerOperations>(),
+ std::make_shared<PCHContainerOperations>(),
Diags->getClient());
return Diags->getClient()->getNumErrors() > 0;
}
@@ -171,8 +171,8 @@ static bool performTransformations(StringRef resourcesPath,
if (!origCI.getLangOpts()->ObjC1)
return false;
- MigrationProcess migration(
- origCI, std::make_shared<RawPCHContainerOperations>(), DiagClient);
+ MigrationProcess migration(origCI, std::make_shared<PCHContainerOperations>(),
+ DiagClient);
std::vector<TransformFn>
transforms = arcmt::getAllTransformations(origCI.getLangOpts()->getGC(),
diff --git a/tools/c-index-test/c-index-test.c b/tools/c-index-test/c-index-test.c
index 980f341824fa..eeeb832cd873 100644
--- a/tools/c-index-test/c-index-test.c
+++ b/tools/c-index-test/c-index-test.c
@@ -255,6 +255,17 @@ static int parse_remapped_files_with_try(int try_idx,
if (ret)
return ret;
+ if (num_unsaved_files_no_try_idx == 0) {
+ *unsaved_files = unsaved_files_try_idx;
+ *num_unsaved_files = num_unsaved_files_try_idx;
+ return 0;
+ }
+ if (num_unsaved_files_try_idx == 0) {
+ *unsaved_files = unsaved_files_no_try_idx;
+ *num_unsaved_files = num_unsaved_files_no_try_idx;
+ return 0;
+ }
+
*num_unsaved_files = num_unsaved_files_no_try_idx + num_unsaved_files_try_idx;
*unsaved_files
= (struct CXUnsavedFile *)realloc(unsaved_files_no_try_idx,
diff --git a/tools/clang-check/CMakeLists.txt b/tools/clang-check/CMakeLists.txt
index d1572cb16639..04151a8e0331 100644
--- a/tools/clang-check/CMakeLists.txt
+++ b/tools/clang-check/CMakeLists.txt
@@ -11,7 +11,6 @@ add_clang_executable(clang-check
target_link_libraries(clang-check
clangAST
clangBasic
- clangCodeGen
clangDriver
clangFrontend
clangRewriteFrontend
diff --git a/tools/clang-check/ClangCheck.cpp b/tools/clang-check/ClangCheck.cpp
index 2682e0fcd83f..a9934c978d30 100644
--- a/tools/clang-check/ClangCheck.cpp
+++ b/tools/clang-check/ClangCheck.cpp
@@ -160,8 +160,7 @@ int main(int argc, const char **argv) {
CommonOptionsParser OptionsParser(argc, argv, ClangCheckCategory);
ClangTool Tool(OptionsParser.getCompilations(),
- OptionsParser.getSourcePathList(),
- std::make_shared<clang::ObjectFilePCHContainerOperations>());
+ OptionsParser.getSourcePathList());
// Clear adjusters because -fsyntax-only is inserted by the default chain.
Tool.clearArgumentsAdjusters();
diff --git a/tools/driver/cc1_main.cpp b/tools/driver/cc1_main.cpp
index 65f845ddc458..8240561236b2 100644
--- a/tools/driver/cc1_main.cpp
+++ b/tools/driver/cc1_main.cpp
@@ -65,10 +65,14 @@ void initializePollyPasses(llvm::PassRegistry &Registry);
#endif
int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
- std::unique_ptr<CompilerInstance> Clang(new CompilerInstance(
- std::make_shared<ObjectFilePCHContainerOperations>()));
+ std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ // Register the support for object-file-wrapped Clang modules.
+ auto PCHOps = Clang->getPCHContainerOperations();
+ PCHOps->registerWriter(llvm::make_unique<ObjectFilePCHContainerWriter>());
+ PCHOps->registerReader(llvm::make_unique<ObjectFilePCHContainerReader>());
+
// Initialize targets first, so that --version shows registered targets.
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
diff --git a/tools/driver/cc1as_main.cpp b/tools/driver/cc1as_main.cpp
index aa92541953fa..fd0fbb4c5a26 100644
--- a/tools/driver/cc1as_main.cpp
+++ b/tools/driver/cc1as_main.cpp
@@ -406,6 +406,9 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
Failed = Parser->Run(Opts.NoInitialTextSection);
}
+ // Close Streamer first.
+ // It might have a reference to the output stream.
+ Str.reset();
// Close the output stream early.
BOS.reset();
FDOS.reset();
diff --git a/tools/libclang/CIndex.cpp b/tools/libclang/CIndex.cpp
index 6e48bab9f509..8225a6c44293 100644
--- a/tools/libclang/CIndex.cpp
+++ b/tools/libclang/CIndex.cpp
@@ -2884,8 +2884,8 @@ CXIndex clang_createIndex(int excludeDeclarationsFromPCH,
llvm::InitializeAllAsmPrinters();
llvm::InitializeAllAsmParsers();
- CIndexer *CIdxr =
- new CIndexer(std::make_shared<ObjectFilePCHContainerOperations>());
+ CIndexer *CIdxr = new CIndexer();
+
if (excludeDeclarationsFromPCH)
CIdxr->setOnlyLocalDecls();
if (displayDiagnostics)
@@ -2954,8 +2954,8 @@ enum CXErrorCode clang_createTranslationUnit2(CXIndex CIdx,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
CompilerInstance::createDiagnostics(new DiagnosticOptions());
std::unique_ptr<ASTUnit> AU = ASTUnit::LoadFromASTFile(
- ast_filename, CXXIdx->getPCHContainerOperations(), Diags, FileSystemOpts,
- CXXIdx->getOnlyLocalDecls(), None,
+ ast_filename, CXXIdx->getPCHContainerOperations()->getRawReader(), Diags,
+ FileSystemOpts, CXXIdx->getOnlyLocalDecls(), None,
/*CaptureDiagnostics=*/true,
/*AllowPCHWithCompilerErrors=*/true,
/*UserFilesAreVolatile=*/true);
diff --git a/tools/libclang/CIndexer.h b/tools/libclang/CIndexer.h
index 8a306cde67ca..bf55301d618e 100644
--- a/tools/libclang/CIndexer.h
+++ b/tools/libclang/CIndexer.h
@@ -44,7 +44,7 @@ class CIndexer {
public:
CIndexer(std::shared_ptr<PCHContainerOperations> PCHContainerOps =
- std::make_shared<RawPCHContainerOperations>())
+ std::make_shared<PCHContainerOperations>())
: OnlyLocalDecls(false), DisplayDiagnostics(false),
Options(CXGlobalOpt_None), PCHContainerOps(PCHContainerOps) {}
diff --git a/tools/libclang/CMakeLists.txt b/tools/libclang/CMakeLists.txt
index fb292090565d..06db70e61ad5 100644
--- a/tools/libclang/CMakeLists.txt
+++ b/tools/libclang/CMakeLists.txt
@@ -40,7 +40,6 @@ set(SOURCES
set(LIBS
clangAST
clangBasic
- clangCodeGen
clangFrontend
clangIndex
clangLex
diff --git a/unittests/ASTMatchers/ASTMatchersTest.h b/unittests/ASTMatchers/ASTMatchersTest.h
index d79a3175b476..403a305db244 100644
--- a/unittests/ASTMatchers/ASTMatchersTest.h
+++ b/unittests/ASTMatchers/ASTMatchersTest.h
@@ -80,7 +80,7 @@ testing::AssertionResult matchesConditionally(
Args.push_back("-frtti");
Args.push_back("-fexceptions");
if (!runToolOnCodeWithArgs(Factory->create(), Code, Args, Filename,
- std::make_shared<RawPCHContainerOperations>(),
+ std::make_shared<PCHContainerOperations>(),
VirtualMappedFiles)) {
return testing::AssertionFailure() << "Parsing error in \"" << Code << "\"";
}
diff --git a/utils/TableGen/ClangAttrEmitter.cpp b/utils/TableGen/ClangAttrEmitter.cpp
index 5dc33a000a42..f2aa4002b99f 100644
--- a/utils/TableGen/ClangAttrEmitter.cpp
+++ b/utils/TableGen/ClangAttrEmitter.cpp
@@ -326,7 +326,8 @@ namespace {
OS << " " << getLowerName() << "Length = S.size();\n";
OS << " this->" << getLowerName() << " = new (C, 1) char ["
<< getLowerName() << "Length];\n";
- OS << " std::memcpy(this->" << getLowerName() << ", S.data(), "
+ OS << " if (!S.empty())\n";
+ OS << " std::memcpy(this->" << getLowerName() << ", S.data(), "
<< getLowerName() << "Length);\n";
OS << " }";
}
@@ -337,7 +338,8 @@ namespace {
OS << "A->get" << getUpperName() << "()";
}
void writeCtorBody(raw_ostream &OS) const override {
- OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
+ OS << " if (!" << getUpperName() << ".empty())\n";
+ OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
<< ".data(), " << getLowerName() << "Length);";
}
void writeCtorInitializers(raw_ostream &OS) const override {
diff --git a/www/analyzer/checker_dev_manual.html b/www/analyzer/checker_dev_manual.html
index c2606f92ee5e..c674a74284b4 100644
--- a/www/analyzer/checker_dev_manual.html
+++ b/www/analyzer/checker_dev_manual.html
@@ -28,7 +28,7 @@ and refer to this page for additional information on writing a checker. The stat
part of the Clang project, so consult <a href="http://clang.llvm.org/hacking.html">Hacking on Clang</a>
and <a href="http://llvm.org/docs/ProgrammersManual.html">LLVM Programmer's Manual</a>
for developer guidelines and send your questions and proposals to
-<a href=http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev>cfe-dev mailing list</a>.
+<a href=http://lists.llvm.org/mailman/listinfo/cfe-dev>cfe-dev mailing list</a>.
</p>
<ul>
@@ -592,10 +592,10 @@ up-to-date documentation about the APIs available in Clang. Relevant entries
have been linked throughout this page. Also of use is the
<a href="http://llvm.org/doxygen">LLVM doxygen</a>, when dealing with classes
from LLVM.
-<li> The <a href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev">
+<li> The <a href="http://lists.llvm.org/mailman/listinfo/cfe-dev">
cfe-dev mailing list</a>. This is the primary mailing list used for
discussion of Clang development (including static code analysis). The
-<a href="http://lists.cs.uiuc.edu/pipermail/cfe-dev">archive</a> also contains
+<a href="http://lists.llvm.org/pipermail/cfe-dev">archive</a> also contains
a lot of information.
<li> The "Building a Checker in 24 hours" presentation given at the <a
href="http://llvm.org/devmtg/2012-11">November 2012 LLVM Developer's
diff --git a/www/analyzer/installation.html b/www/analyzer/installation.html
index 2eb7937ee6dd..54a5da51adcf 100644
--- a/www/analyzer/installation.html
+++ b/www/analyzer/installation.html
@@ -39,7 +39,7 @@ weeks old.</p>
we need volunteers who are willing to help provide such regular builds.
If you wish to help contribute regular builds of the analyzer on other
platforms, please email the <a
-href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev">Clang
+href="http://lists.llvm.org/mailman/listinfo/cfe-dev">Clang
Developers' mailing list</a>.</p>
<h3>Using Packaged Builds</h3>
diff --git a/www/analyzer/menu.html.incl b/www/analyzer/menu.html.incl
index c487160c4ae0..ce24834eb164 100644
--- a/www/analyzer/menu.html.incl
+++ b/www/analyzer/menu.html.incl
@@ -34,8 +34,8 @@
<li>
Mailing Lists
<ul>
- <li><a href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev">cfe-dev</a></li>
- <li><a href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits">cfe-commits</a></li>
+ <li><a href="http://lists.llvm.org/mailman/listinfo/cfe-dev">cfe-dev</a></li>
+ <li><a href="http://lists.llvm.org/mailman/listinfo/cfe-commits">cfe-commits</a></li>
</ul>
</li>
</ul>
diff --git a/www/analyzer/open_projects.html b/www/analyzer/open_projects.html
index 4c3429da1f96..f354015c3f32 100644
--- a/www/analyzer/open_projects.html
+++ b/www/analyzer/open_projects.html
@@ -19,7 +19,7 @@
power. Most of the projects listed here are infrastructure-related so this list
is an addition to the <a href="potential_checkers.html">potential checkers
list</a>. If you are interested in tackling one of these, please send an email
-to the <a href=http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev>cfe-dev
+to the <a href=http://lists.llvm.org/mailman/listinfo/cfe-dev>cfe-dev
mailing list</a> to notify other members of the community.</p>
<ul>
diff --git a/www/cxx_dr_status.html b/www/cxx_dr_status.html
index c8ac750aa72e..81b893aeb568 100644
--- a/www/cxx_dr_status.html
+++ b/www/cxx_dr_status.html
@@ -28,7 +28,7 @@
<!--*************************************************************************-->
<h1>C++ Defect Report Support in Clang</h1>
<!--*************************************************************************-->
-<p>Last updated: $Date: 2015-07-06 03:45:27 +0200 (Mon, 06 Jul 2015) $</p>
+<p>Last updated: $Date: 2015-07-15 01:57:14 +0200 (Wed, 15 Jul 2015) $</p>
<h2 id="cxxdr">C++ defect report implementation status</h2>
@@ -1811,7 +1811,7 @@ of class templates</td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#295">295</a></td>
<td>CD1</td>
<td>cv-qualifiers on function types</td>
- <td class="svn" align="center">SVN</td>
+ <td class="full" align="center">Clang 3.7</td>
</tr>
<tr id="296">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#296">296</a></td>
@@ -4681,7 +4681,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#777">777</a></td>
<td>CD2</td>
<td>Default arguments and parameter packs</td>
- <td class="svn" align="center">SVN</td>
+ <td class="full" align="center">Clang 3.7</td>
</tr>
<tr id="778">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#778">778</a></td>
@@ -5503,7 +5503,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#948">948</a></td>
<td>C++11</td>
<td><TT>constexpr</TT> in <I>condition</I>s</td>
- <td class="svn" align="center">SVN</td>
+ <td class="full" align="center">Clang 3.7</td>
</tr>
<tr class="open" id="949">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#949">949</a></td>
@@ -8617,7 +8617,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1467">1467</a></td>
<td>DR</td>
<td>List-initialization of aggregate from same-type object</td>
- <td class="svn" align="center">SVN (C++11 onwards)</td>
+ <td class="full" align="center">Clang 3.7 (C++11 onwards)</td>
</tr>
<tr class="open" id="1468">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1468">1468</a></td>
@@ -8755,7 +8755,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1490">1490</a></td>
<td>DR</td>
<td>List-initialization from a string literal</td>
- <td class="svn" align="center">SVN (C++11 onwards)</td>
+ <td class="full" align="center">Clang 3.7 (C++11 onwards)</td>
</tr>
<tr id="1491">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1491">1491</a></td>
@@ -9349,7 +9349,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1589">1589</a></td>
<td>DR</td>
<td>Ambiguous ranking of list-initialization sequences</td>
- <td class="svn" align="center">SVN (C++11 onwards)</td>
+ <td class="full" align="center">Clang 3.7 (C++11 onwards)</td>
</tr>
<tr class="open" id="1590">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1590">1590</a></td>
@@ -9601,7 +9601,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1631">1631</a></td>
<td>DR</td>
<td>Incorrect overload resolution for single-element <I>initializer-list</I></td>
- <td class="svn" align="center">SVN (C++11 onwards)</td>
+ <td class="full" align="center">Clang 3.7 (C++11 onwards)</td>
</tr>
<tr class="open" id="1632">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1632">1632</a></td>
@@ -10303,7 +10303,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1748">1748</a></td>
<td>DR</td>
<td>Placement new with a null pointer</td>
- <td class="svn" align="center">SVN</td>
+ <td class="full" align="center">Clang 3.7</td>
</tr>
<tr id="1749">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_closed.html#1749">1749</a></td>
@@ -10351,7 +10351,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1756">1756</a></td>
<td>DR</td>
<td>Direct-list-initialization of a non-class object</td>
- <td class="svn" align="center">SVN (C++11 onwards)</td>
+ <td class="full" align="center">Clang 3.7 (C++11 onwards)</td>
</tr>
<tr id="1757">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1757">1757</a></td>
@@ -10363,7 +10363,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1758">1758</a></td>
<td>DR</td>
<td>Explicit conversion in copy/move list initialization</td>
- <td class="svn" align="center">SVN (C++11 onwards)</td>
+ <td class="full" align="center">Clang 3.7 (C++11 onwards)</td>
</tr>
<tr id="1759">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1759">1759</a></td>
@@ -11227,7 +11227,7 @@ and <I>POD class</I></td>
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1902">1902</a></td>
<td>DR</td>
<td>What makes a conversion &#8220;otherwise ill-formed&#8221;?</td>
- <td class="svn" align="center">SVN</td>
+ <td class="full" align="center">Clang 3.7</td>
</tr>
<tr class="open" id="1903">
<td><a href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1903">1903</a></td>
diff --git a/www/demo/index.cgi b/www/demo/index.cgi
index 901b009dbdea..8ea9cc7196af 100644
--- a/www/demo/index.cgi
+++ b/www/demo/index.cgi
@@ -19,7 +19,7 @@ if ( !-d $ROOT ) { mkdir( $ROOT, 0777 ); }
my $LOGFILE = "$ROOT/log.txt";
my $FORM_URL = 'index.cgi';
my $MAILADDR = 'sabre@nondot.org';
-my $CONTACT_ADDRESS = 'Questions or comments? Email the <a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">LLVMdev mailing list</a>.';
+my $CONTACT_ADDRESS = 'Questions or comments? Email the <a href="http://lists.llvm.org/mailman/listinfo/llvm-dev">LLVM-dev mailing list</a>.';
my $LOGO_IMAGE_URL = 'cathead.png';
my $TIMEOUTAMOUNT = 20;
$ENV{'LD_LIBRARY_PATH'} = '/home/vadve/shared/localtools/fc1/lib/';
diff --git a/www/get_involved.html b/www/get_involved.html
index 06fee594200a..fdd6b8f33c8c 100644
--- a/www/get_involved.html
+++ b/www/get_involved.html
@@ -33,10 +33,10 @@ has its own mailing lists because the communities have people with different
interests. The two clang lists are:</p>
<ul>
-<li><a href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits">cfe-commits
+<li><a href="http://lists.llvm.org/mailman/listinfo/cfe-commits">cfe-commits
</a> - This list is for patch submission/discussion.</li>
-<li><a href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev">cfe-dev</a> -
+<li><a href="http://lists.llvm.org/mailman/listinfo/cfe-dev">cfe-dev</a> -
This list is for everything else Clang related (questions and answers, design
discussions, etc).</li>
@@ -45,17 +45,17 @@ discussions, etc).</li>
<p>If you are interested in clang only, these two lists should be all
you need. If you are interested in the LLVM optimizer and code generator,
please consider signing up for <a
-href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">llvmdev</a> and <a
-href="http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits">llvm-commits</a>
+href="http://lists.llvm.org/mailman/listinfo/llvm-dev">llvm-dev</a> and <a
+href="http://lists.llvm.org/mailman/listinfo/llvm-commits">llvm-commits</a>
as well.</p>
<p>The best way to talk with other developers on the project is through the <a
-href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev">cfe-dev mailing
+href="http://lists.llvm.org/mailman/listinfo/cfe-dev">cfe-dev mailing
list</a>. The clang mailing list is a very friendly place and we welcome
newcomers. In addition to the cfe-dev list, a significant amount of design
discussion takes place on the <a
-href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits">cfe-commits mailing
+href="http://lists.llvm.org/mailman/listinfo/cfe-commits">cfe-commits mailing
list</a>. All of these lists have archives, so you can browse through previous
discussions or follow the list development on the web if you prefer.</p>
diff --git a/www/make_cxx_dr_status b/www/make_cxx_dr_status
index db316353e0d7..bb3007f432a5 100755
--- a/www/make_cxx_dr_status
+++ b/www/make_cxx_dr_status
@@ -102,10 +102,10 @@ def availability(issue):
if status == 'unknown':
avail = 'Unknown'
avail_style = ' class="none"'
- elif status == '3.7':
+ elif status == '3.8':
avail = 'SVN'
avail_style = ' class="svn"'
- elif status in ('3.1', '3.2', '3.3', '3.4', '3.5', '3.6'):
+ elif status in ('3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7'):
avail = 'Clang %s' % status
avail_style = ' class="full"'
elif status == 'yes':
diff --git a/www/menu.html.incl b/www/menu.html.incl
index 390f7940195c..abbd2c1cc16a 100644
--- a/www/menu.html.incl
+++ b/www/menu.html.incl
@@ -33,9 +33,9 @@
<div class="submenu">
<label>Communication</label>
- <a href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-users">cfe-users List</a>
- <a href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev">cfe-dev List</a>
- <a href="http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits">cfe-commits List</a>
+ <a href="http://lists.llvm.org/mailman/listinfo/cfe-users">cfe-users List</a>
+ <a href="http://lists.llvm.org/mailman/listinfo/cfe-dev">cfe-dev List</a>
+ <a href="http://lists.llvm.org/mailman/listinfo/cfe-commits">cfe-commits List</a>
<a href="http://llvm.org/bugs/">Bug Reports</a>
<a href="http://planet.clang.org/">Planet Clang</a>
<a href="irc://irc.oftc.net/llvm">IRC: irc.oftc.net#llvm</a>