aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/dead-stores.m2
-rw-r--r--test/CodeGen/builtins-ppc-altivec.c146
-rw-r--r--test/CodeGen/builtins-systemz-zvector-error.c576
-rw-r--r--test/CodeGen/builtins-systemz-zvector.c2967
-rw-r--r--test/CodeGen/integer-overflow.c7
-rw-r--r--test/CodeGen/le32-regparm.c1
-rw-r--r--test/CodeGen/long_double_fp128.cpp22
-rw-r--r--test/CodeGen/palignr.c4
-rw-r--r--test/CodeGen/x86_64-fp128.c115
-rw-r--r--test/CodeGen/zvector.c2798
-rw-r--r--test/CodeGenCXX/dllimport-rtti.cpp13
-rw-r--r--test/CodeGenCXX/thunks.cpp17
-rw-r--r--test/CodeGenObjC/Inputs/nsvalue-boxed-expressions-support.h63
-rw-r--r--test/CodeGenObjC/nsvalue-objc-boxable-ios-arc.m126
-rw-r--r--test/CodeGenObjC/nsvalue-objc-boxable-ios.m114
-rw-r--r--test/CodeGenObjC/nsvalue-objc-boxable-mac-arc.m130
-rw-r--r--test/CodeGenObjC/nsvalue-objc-boxable-mac.m118
-rw-r--r--test/CodeGenObjCXX/designated-initializers.mm36
-rw-r--r--test/CodeGenOpenCL/vector_shufflevector_valid.cl13
-rw-r--r--test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_clang_tree/mingw32/i686-w64-mingw32/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_clang_tree/mingw32/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/i686-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_mingw_org_tree/mingw/minw32/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/i686-w64-mingw32/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/i686-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_opensuse_tree/usr/x86_64-w64-mingw32/sys-root/mingw/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/86_64-w64-mingw32/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/backward/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include/.keep0
-rw-r--r--test/Driver/Inputs/mingw_ubuntu_tree/usr/x86_64-w64-mingw32/include/.keep0
-rw-r--r--test/Driver/arm-ias-Wa.s64
-rw-r--r--test/Driver/cuda-options.cu14
-rw-r--r--test/Driver/linux-ld.c7
-rw-r--r--test/Driver/mingw.cpp59
-rw-r--r--test/Headers/pmmintrin.c12
-rw-r--r--test/Headers/x86intrin-2.c137
-rw-r--r--test/Headers/x86intrin.c121
-rw-r--r--test/Lexer/has_attribute_objc_boxable.m8
-rw-r--r--test/Lexer/has_feature_boxed_nsvalue_expressions.m8
-rw-r--r--test/Misc/cc1as-asm.s3
-rw-r--r--test/Modules/pch_container.m11
-rw-r--r--test/OpenMP/for_codegen.cpp79
-rw-r--r--test/OpenMP/for_loop_messages.cpp8
-rw-r--r--test/OpenMP/for_simd_loop_messages.cpp8
-rw-r--r--test/OpenMP/openmp_common.c5
-rw-r--r--test/OpenMP/parallel_copyin_codegen.cpp259
-rw-r--r--test/OpenMP/parallel_for_loop_messages.cpp8
-rw-r--r--test/OpenMP/parallel_for_simd_loop_messages.cpp8
-rw-r--r--test/OpenMP/simd_linear_messages.cpp3
-rw-r--r--test/OpenMP/simd_loop_messages.cpp2
-rw-r--r--test/PCH/objc_boxable.m17
-rw-r--r--test/PCH/objc_boxable_record.h5
-rw-r--r--test/PCH/objc_boxable_record_attr.h3
-rw-r--r--test/Preprocessor/predefined-arch-macros.c9
-rw-r--r--test/Sema/dllimport.c2
-rw-r--r--test/Sema/typo-correction.c6
-rw-r--r--test/Sema/zvector.c1009
-rw-r--r--test/SemaCXX/warn-pessmizing-move.cpp50
-rw-r--r--test/SemaCXX/warn-redundant-move.cpp72
-rw-r--r--test/SemaObjC/circular-container.m61
-rw-r--r--test/SemaObjC/comptypes-9.m2
-rw-r--r--test/SemaObjC/objc-boxed-expressions-nsvalue.m101
-rw-r--r--test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm118
-rw-r--r--test/SemaTemplate/crash-unparsed-exception.cpp17
82 files changed, 9333 insertions, 231 deletions
diff --git a/test/Analysis/dead-stores.m b/test/Analysis/dead-stores.m
index 13b28dcf5374..9989efae2e8e 100644
--- a/test/Analysis/dead-stores.m
+++ b/test/Analysis/dead-stores.m
@@ -28,7 +28,7 @@ extern NSString *NSAlignmentBinding;
// This test case was reported as a false positive due to a bug in the
// LiveVariables <-> deadcode.DeadStores interplay. We should not flag a warning
// here. The test case was reported in:
-// http://lists.cs.uiuc.edu/pipermail/cfe-dev/2008-July/002157.html
+// http://lists.llvm.org/pipermail/cfe-dev/2008-July/002157.html
void DeadStoreTest(NSObject *anObject) {
NSArray *keys;
if ((keys = [anObject exposedBindings]) && // no-warning
diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c
index 8e8216b10111..32166b50f985 100644
--- a/test/CodeGen/builtins-ppc-altivec.c
+++ b/test/CodeGen/builtins-ppc-altivec.c
@@ -3307,81 +3307,225 @@ void test6() {
/* vec_sld */
res_vsc = vec_sld(vsc, vsc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_sld(vuc, vuc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_sld(vs, vs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_sld(vus, vus, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vbs = vec_sld(vbs, vbs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
res_vp = vec_sld(vp, vp, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_sld(vi, vi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_sld(vui, vui, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vbi = vec_sld(vbi, vbi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: xor <16 x i8>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
res_vf = vec_sld(vf, vf, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vsc = vec_vsldoi(vsc, vsc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_vsldoi(vuc, vuc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_vsldoi(vs, vs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_vsldoi(vus, vus, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vp = vec_vsldoi(vp, vp, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_vsldoi(vi, vi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_vsldoi(vui, vui, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vf = vec_vsldoi(vf, vf, 0);
-// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_sll */
diff --git a/test/CodeGen/builtins-systemz-zvector-error.c b/test/CodeGen/builtins-systemz-zvector-error.c
new file mode 100644
index 000000000000..8d5380dac161
--- /dev/null
+++ b/test/CodeGen/builtins-systemz-zvector-error.c
@@ -0,0 +1,576 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
+// RUN: -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -fsyntax-only -verify %s
+
+#include <vecintrin.h>
+
+volatile vector signed char vsc;
+volatile vector signed short vss;
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned char vuc;
+volatile vector unsigned short vus;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool char vbc;
+volatile vector bool short vbs;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector double vd;
+
+volatile signed char sc;
+volatile signed short ss;
+volatile signed int si;
+volatile signed long long sl;
+volatile unsigned char uc;
+volatile unsigned short us;
+volatile unsigned int ui;
+volatile unsigned long long ul;
+volatile double d;
+
+const void * volatile cptr;
+const signed char * volatile cptrsc;
+const signed short * volatile cptrss;
+const signed int * volatile cptrsi;
+const signed long long * volatile cptrsl;
+const unsigned char * volatile cptruc;
+const unsigned short * volatile cptrus;
+const unsigned int * volatile cptrui;
+const unsigned long long * volatile cptrul;
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+void * volatile ptr;
+signed char * volatile ptrsc;
+signed short * volatile ptrss;
+signed int * volatile ptrsi;
+signed long long * volatile ptrsl;
+unsigned char * volatile ptruc;
+unsigned short * volatile ptrus;
+unsigned int * volatile ptrui;
+unsigned long long * volatile ptrul;
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile unsigned int len;
+volatile int idx;
+int cc;
+
+void test_core(void) {
+ len = __lcbb(cptr, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 200); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 8192); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+
+ vsl = vec_permi(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_permi(vsl, vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_permi(vsl, vsl, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+
+ vsi = vec_gather_element(vsi, vui, cptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_gather_element(vsi, vui, cptrsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_gather_element(vsi, vui, cptrsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vec_scatter_element(vsi, vui, ptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsi, vui, ptrsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsi, vui, ptrsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsl, vul, ptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vsl, vul, ptrsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vsl, vul, ptrsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vsc = vec_load_bndry(cptrsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 200); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 8192); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vuc = vec_load_bndry(cptruc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vss = vec_load_bndry(cptrss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vus = vec_load_bndry(cptrus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsi = vec_load_bndry(cptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vui = vec_load_bndry(cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsl = vec_load_bndry(cptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vul = vec_load_bndry(cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+
+ vuc = vec_genmask(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+
+ vuc = vec_genmasks_8(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_genmasks_8(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_genmasks_8(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+
+ vsc = vec_splat(vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_splat(vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_splat(vsc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vss = vec_splat(vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vss = vec_splat(vss, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vss = vec_splat(vss, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vsi = vec_splat(vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_splat(vsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_splat(vsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vsl = vec_splat(vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_splat(vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_splat(vsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vsc = vec_splat_s8(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_splat_u8(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vss = vec_splat_s16(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_splat_u16(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vsi = vec_splat_s32(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_splat_u32(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vsl = vec_splat_s64(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_splat_u64(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+}
+
+void test_integer(void) {
+ vsc = vec_rl_mask(vsc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vuc = vec_rl_mask(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vss = vec_rl_mask(vss, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vus = vec_rl_mask(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vsi = vec_rl_mask(vsi, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vui = vec_rl_mask(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vsl = vec_rl_mask(vsl, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vul = vec_rl_mask(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+
+ vsc = vec_sld(vsc, vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_sld(vsc, vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_sld(vsc, vsc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vss = vec_sld(vss, vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vus = vec_sld(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsi = vec_sld(vsi, vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vui = vec_sld(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsl = vec_sld(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vul = vec_sld(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vd = vec_sld(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+
+ vsc = vec_sldw(vsc, vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsc = vec_sldw(vsc, vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsc = vec_sldw(vsc, vsc, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vss = vec_sldw(vss, vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vus = vec_sldw(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_sldw(vsi, vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_sldw(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_sldw(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vul = vec_sldw(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_sldw(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+}
+
+void test_float(void) {
+ vd = vec_ctd(vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vsl, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+
+ vsl = vec_ctsl(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vsl = vec_ctsl(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vsl = vec_ctsl(vd, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+
+ vbl = vec_fp_test_data_class(vd, idx, &cc); // expected-error {{must be a constant integer}}
+ vbl = vec_fp_test_data_class(vd, -1, &cc); // expected-error {{should be a value from 0 to 4095}}
+ vbl = vec_fp_test_data_class(vd, 4096, &cc); // expected-error {{should be a value from 0 to 4095}}
+}
diff --git a/test/CodeGen/builtins-systemz-zvector.c b/test/CodeGen/builtins-systemz-zvector.c
new file mode 100644
index 000000000000..6d554af44e93
--- /dev/null
+++ b/test/CodeGen/builtins-systemz-zvector.c
@@ -0,0 +1,2967 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+
+#include <vecintrin.h>
+
+volatile vector signed char vsc;
+volatile vector signed short vss;
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned char vuc;
+volatile vector unsigned short vus;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool char vbc;
+volatile vector bool short vbs;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector double vd;
+
+volatile signed char sc;
+volatile signed short ss;
+volatile signed int si;
+volatile signed long long sl;
+volatile unsigned char uc;
+volatile unsigned short us;
+volatile unsigned int ui;
+volatile unsigned long long ul;
+volatile double d;
+
+const void * volatile cptr;
+const signed char * volatile cptrsc;
+const signed short * volatile cptrss;
+const signed int * volatile cptrsi;
+const signed long long * volatile cptrsl;
+const unsigned char * volatile cptruc;
+const unsigned short * volatile cptrus;
+const unsigned int * volatile cptrui;
+const unsigned long long * volatile cptrul;
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+void * volatile ptr;
+signed char * volatile ptrsc;
+signed short * volatile ptrss;
+signed int * volatile ptrsi;
+signed long long * volatile ptrsl;
+unsigned char * volatile ptruc;
+unsigned short * volatile ptrus;
+unsigned int * volatile ptrui;
+unsigned long long * volatile ptrul;
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile unsigned int len;
+volatile int idx;
+int cc;
+
+void test_core(void) {
+ len = __lcbb(cptr, 64);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 0)
+ len = __lcbb(cptr, 128);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 1)
+ len = __lcbb(cptr, 256);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 2)
+ len = __lcbb(cptr, 512);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 3)
+ len = __lcbb(cptr, 1024);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 4)
+ len = __lcbb(cptr, 2048);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 5)
+ len = __lcbb(cptr, 4096);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 6)
+
+ sc = vec_extract(vsc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ uc = vec_extract(vuc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ uc = vec_extract(vbc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ ss = vec_extract(vss, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ us = vec_extract(vus, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ us = vec_extract(vbs, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ si = vec_extract(vsi, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ ui = vec_extract(vui, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ ui = vec_extract(vbi, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ sl = vec_extract(vsl, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ ul = vec_extract(vul, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ ul = vec_extract(vbl, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ d = vec_extract(vd, idx);
+ // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_insert(sc, vsc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_insert(uc, vuc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_insert(uc, vbc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vss = vec_insert(ss, vss, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_insert(us, vus, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_insert(us, vbs, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vsi = vec_insert(si, vsi, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_insert(ui, vui, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_insert(ui, vbi, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vsl = vec_insert(sl, vsl, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_insert(ul, vul, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_insert(ul, vbl, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vd = vec_insert(d, vd, idx);
+ // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_promote(sc, idx);
+ // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_promote(uc, idx);
+ // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 %{{.*}}
+ vss = vec_promote(ss, idx);
+ // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_promote(us, idx);
+ // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 %{{.*}}
+ vsi = vec_promote(si, idx);
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_promote(ui, idx);
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 %{{.*}}
+ vsl = vec_promote(sl, idx);
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_promote(ul, idx);
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 %{{.*}}
+ vd = vec_promote(d, idx);
+ // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_insert_and_zero(cptrsc);
+ // CHECK: insertelement <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i8 %{{.*}}, i32 7
+ vuc = vec_insert_and_zero(cptruc);
+ // CHECK: insertelement <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i8 %{{.*}}, i32 7
+ vss = vec_insert_and_zero(cptrss);
+ // CHECK: insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0>, i16 %{{.*}}, i32 3
+ vus = vec_insert_and_zero(cptrus);
+ // CHECK: insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0>, i16 %{{.*}}, i32 3
+ vsi = vec_insert_and_zero(cptrsi);
+ // CHECK: insertelement <4 x i32> <i32 0, i32 undef, i32 0, i32 0>, i32 %{{.*}}, i32 1
+ vui = vec_insert_and_zero(cptrui);
+ // CHECK: insertelement <4 x i32> <i32 0, i32 undef, i32 0, i32 0>, i32 %{{.*}}, i32 1
+ vsl = vec_insert_and_zero(cptrsl);
+ // CHECK: insertelement <2 x i64> <i64 undef, i64 0>, i64 %{{.*}}, i32 0
+ vul = vec_insert_and_zero(cptrul);
+ // CHECK: insertelement <2 x i64> <i64 undef, i64 0>, i64 %{{.*}}, i32 0
+ vd = vec_insert_and_zero(cptrd);
+ // CHECK: insertelement <2 x double> <double undef, double 0.000000e+00>, double %{{.*}}, i32 0
+
+ vsc = vec_perm(vsc, vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_perm(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_perm(vbc, vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_perm(vss, vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_perm(vus, vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_perm(vbs, vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_perm(vsi, vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_perm(vui, vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_perm(vbi, vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_perm(vsl, vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_perm(vul, vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_perm(vbl, vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_perm(vd, vd, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsl = vec_permi(vsl, vsl, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vsl = vec_permi(vsl, vsl, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vsl = vec_permi(vsl, vsl, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vsl = vec_permi(vsl, vsl, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vul = vec_permi(vul, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vul = vec_permi(vul, vul, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vul = vec_permi(vul, vul, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vul = vec_permi(vul, vul, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vbl = vec_permi(vbl, vbl, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vbl = vec_permi(vbl, vbl, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vbl = vec_permi(vbl, vbl, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vbl = vec_permi(vbl, vbl, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vd = vec_permi(vd, vd, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vd = vec_permi(vd, vd, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vd = vec_permi(vd, vd, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vd = vec_permi(vd, vd, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+
+ vsc = vec_sel(vsc, vsc, vuc);
+ vsc = vec_sel(vsc, vsc, vbc);
+ vuc = vec_sel(vuc, vuc, vuc);
+ vuc = vec_sel(vuc, vuc, vbc);
+ vbc = vec_sel(vbc, vbc, vuc);
+ vbc = vec_sel(vbc, vbc, vbc);
+ vss = vec_sel(vss, vss, vus);
+ vss = vec_sel(vss, vss, vbs);
+ vus = vec_sel(vus, vus, vus);
+ vus = vec_sel(vus, vus, vbs);
+ vbs = vec_sel(vbs, vbs, vus);
+ vbs = vec_sel(vbs, vbs, vbs);
+ vsi = vec_sel(vsi, vsi, vui);
+ vsi = vec_sel(vsi, vsi, vbi);
+ vui = vec_sel(vui, vui, vui);
+ vui = vec_sel(vui, vui, vbi);
+ vbi = vec_sel(vbi, vbi, vui);
+ vbi = vec_sel(vbi, vbi, vbi);
+ vsl = vec_sel(vsl, vsl, vul);
+ vsl = vec_sel(vsl, vsl, vbl);
+ vul = vec_sel(vul, vul, vul);
+ vul = vec_sel(vul, vul, vbl);
+ vbl = vec_sel(vbl, vbl, vul);
+ vbl = vec_sel(vbl, vbl, vbl);
+ vd = vec_sel(vd, vd, vul);
+ vd = vec_sel(vd, vd, vbl);
+
+ vsi = vec_gather_element(vsi, vui, cptrsi, 0);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 1);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 2);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 3);
+ vui = vec_gather_element(vui, vui, cptrui, 0);
+ vui = vec_gather_element(vui, vui, cptrui, 1);
+ vui = vec_gather_element(vui, vui, cptrui, 2);
+ vui = vec_gather_element(vui, vui, cptrui, 3);
+ vbi = vec_gather_element(vbi, vui, cptrui, 0);
+ vbi = vec_gather_element(vbi, vui, cptrui, 1);
+ vbi = vec_gather_element(vbi, vui, cptrui, 2);
+ vbi = vec_gather_element(vbi, vui, cptrui, 3);
+ vsl = vec_gather_element(vsl, vul, cptrsl, 0);
+ vsl = vec_gather_element(vsl, vul, cptrsl, 1);
+ vul = vec_gather_element(vul, vul, cptrul, 0);
+ vul = vec_gather_element(vul, vul, cptrul, 1);
+ vbl = vec_gather_element(vbl, vul, cptrul, 0);
+ vbl = vec_gather_element(vbl, vul, cptrul, 1);
+ vd = vec_gather_element(vd, vul, cptrd, 0);
+ vd = vec_gather_element(vd, vul, cptrd, 1);
+
+ vec_scatter_element(vsi, vui, ptrsi, 0);
+ vec_scatter_element(vsi, vui, ptrsi, 1);
+ vec_scatter_element(vsi, vui, ptrsi, 2);
+ vec_scatter_element(vsi, vui, ptrsi, 3);
+ vec_scatter_element(vui, vui, ptrui, 0);
+ vec_scatter_element(vui, vui, ptrui, 1);
+ vec_scatter_element(vui, vui, ptrui, 2);
+ vec_scatter_element(vui, vui, ptrui, 3);
+ vec_scatter_element(vbi, vui, ptrui, 0);
+ vec_scatter_element(vbi, vui, ptrui, 1);
+ vec_scatter_element(vbi, vui, ptrui, 2);
+ vec_scatter_element(vbi, vui, ptrui, 3);
+ vec_scatter_element(vsl, vul, ptrsl, 0);
+ vec_scatter_element(vsl, vul, ptrsl, 1);
+ vec_scatter_element(vul, vul, ptrul, 0);
+ vec_scatter_element(vul, vul, ptrul, 1);
+ vec_scatter_element(vbl, vul, ptrul, 0);
+ vec_scatter_element(vbl, vul, ptrul, 1);
+ vec_scatter_element(vd, vul, ptrd, 0);
+ vec_scatter_element(vd, vul, ptrd, 1);
+
+ vsc = vec_xld2(idx, cptrsc);
+ vuc = vec_xld2(idx, cptruc);
+ vss = vec_xld2(idx, cptrss);
+ vus = vec_xld2(idx, cptrus);
+ vsi = vec_xld2(idx, cptrsi);
+ vui = vec_xld2(idx, cptrui);
+ vsl = vec_xld2(idx, cptrsl);
+ vul = vec_xld2(idx, cptrul);
+ vd = vec_xld2(idx, cptrd);
+
+ vsc = vec_xlw4(idx, cptrsc);
+ vuc = vec_xlw4(idx, cptruc);
+ vss = vec_xlw4(idx, cptrss);
+ vus = vec_xlw4(idx, cptrus);
+ vsi = vec_xlw4(idx, cptrsi);
+ vui = vec_xlw4(idx, cptrui);
+
+ vec_xstd2(vsc, idx, ptrsc);
+ vec_xstd2(vuc, idx, ptruc);
+ vec_xstd2(vss, idx, ptrss);
+ vec_xstd2(vus, idx, ptrus);
+ vec_xstd2(vsi, idx, ptrsi);
+ vec_xstd2(vui, idx, ptrui);
+ vec_xstd2(vsl, idx, ptrsl);
+ vec_xstd2(vul, idx, ptrul);
+ vec_xstd2(vd, idx, ptrd);
+
+ vec_xstw4(vsc, idx, ptrsc);
+ vec_xstw4(vuc, idx, ptruc);
+ vec_xstw4(vss, idx, ptrss);
+ vec_xstw4(vus, idx, ptrus);
+ vec_xstw4(vsi, idx, ptrsi);
+ vec_xstw4(vui, idx, ptrui);
+
+ vsc = vec_load_bndry(cptrsc, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vuc = vec_load_bndry(cptruc, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vss = vec_load_bndry(cptrss, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vus = vec_load_bndry(cptrus, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsi = vec_load_bndry(cptrsi, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vui = vec_load_bndry(cptrui, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsl = vec_load_bndry(cptrsl, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vul = vec_load_bndry(cptrul, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vd = vec_load_bndry(cptrd, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsc = vec_load_bndry(cptrsc, 128);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 1)
+ vsc = vec_load_bndry(cptrsc, 256);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 2)
+ vsc = vec_load_bndry(cptrsc, 512);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 3)
+ vsc = vec_load_bndry(cptrsc, 1024);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 4)
+ vsc = vec_load_bndry(cptrsc, 2048);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 5)
+ vsc = vec_load_bndry(cptrsc, 4096);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 6)
+
+ vsc = vec_load_len(cptrsc, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vuc = vec_load_len(cptruc, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vss = vec_load_len(cptrss, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vus = vec_load_len(cptrus, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vsi = vec_load_len(cptrsi, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vui = vec_load_len(cptrui, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vsl = vec_load_len(cptrsl, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vul = vec_load_len(cptrul, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vd = vec_load_len(cptrd, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+
+ vec_store_len(vsc, ptrsc, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vuc, ptruc, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vss, ptrss, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vus, ptrus, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vsi, ptrsi, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vui, ptrui, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vsl, ptrsl, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vul, ptrul, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vd, ptrd, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+
+ vsl = vec_load_pair(sl, sl);
+ vul = vec_load_pair(ul, ul);
+
+ vuc = vec_genmask(0);
+ // CHECK: <16 x i8> zeroinitializer
+ vuc = vec_genmask(0x8000);
+ // CHECK: <16 x i8> <i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ vuc = vec_genmask(0xffff);
+ // CHECK: <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+
+ vuc = vec_genmasks_8(0, 7);
+ // CHECK: <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ vuc = vec_genmasks_8(1, 4);
+ // CHECK: <16 x i8> <i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120>
+ vuc = vec_genmasks_8(6, 2);
+ // CHECK: <16 x i8> <i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29>
+ vus = vec_genmasks_16(0, 15);
+ // CHECK: <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ vus = vec_genmasks_16(2, 11);
+ // CHECK: <8 x i16> <i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368>
+ vus = vec_genmasks_16(9, 2);
+ // CHECK: <8 x i16> <i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065>
+ vui = vec_genmasks_32(0, 31);
+ // CHECK: <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ vui = vec_genmasks_32(7, 20);
+ // CHECK: <4 x i32> <i32 33552384, i32 33552384, i32 33552384, i32 33552384>
+ vui = vec_genmasks_32(25, 4);
+ // CHECK: <4 x i32> <i32 -134217601, i32 -134217601, i32 -134217601, i32 -134217601>
+ vul = vec_genmasks_64(0, 63);
+ // CHECK: <2 x i64> <i64 -1, i64 -1>
+ vul = vec_genmasks_64(3, 40);
+ // CHECK: <2 x i64> <i64 2305843009205305344, i64 2305843009205305344>
+ vul = vec_genmasks_64(30, 11);
+ // CHECK: <2 x i64> <i64 -4503582447501313, i64 -4503582447501313>
+
+ vsc = vec_splat(vsc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vsc = vec_splat(vsc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vuc = vec_splat(vuc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vuc = vec_splat(vuc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vbc = vec_splat(vbc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vbc = vec_splat(vbc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vss = vec_splat(vss, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vss = vec_splat(vss, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vus = vec_splat(vus, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vus = vec_splat(vus, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vbs = vec_splat(vbs, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vbs = vec_splat(vbs, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vsi = vec_splat(vsi, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vsi = vec_splat(vsi, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vui = vec_splat(vui, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vui = vec_splat(vui, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vbi = vec_splat(vbi, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vbi = vec_splat(vbi, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vsl = vec_splat(vsl, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vsl = vec_splat(vsl, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vul = vec_splat(vul, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vul = vec_splat(vul, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vbl = vec_splat(vbl, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vbl = vec_splat(vbl, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vd = vec_splat(vd, 0);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+ vd = vec_splat(vd, 1);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+
+ vsc = vec_splat_s8(-128);
+ // CHECK: <16 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
+ vsc = vec_splat_s8(127);
+ // CHECK: <16 x i8> <i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127>
+ vuc = vec_splat_u8(1);
+ // CHECK: <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ vuc = vec_splat_u8(254);
+ // CHECK: <16 x i8> <i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2>
+ vss = vec_splat_s16(-32768);
+ // CHECK: <8 x i16> <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
+ vss = vec_splat_s16(32767);
+ // CHECK: <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>
+ vus = vec_splat_u16(1);
+ // CHECK: <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ vus = vec_splat_u16(65534);
+ // CHECK: <8 x i16> <i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2>
+ vsi = vec_splat_s32(-32768);
+ // CHECK: <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ vsi = vec_splat_s32(32767);
+ // CHECK: <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+ vui = vec_splat_u32(-32768);
+ // CHECK: <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ vui = vec_splat_u32(32767);
+ // CHECK: <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+ vsl = vec_splat_s64(-32768);
+ // CHECK: <2 x i64> <i64 -32768, i64 -32768>
+ vsl = vec_splat_s64(32767);
+ // CHECK: <2 x i64> <i64 32767, i64 32767>
+ vul = vec_splat_u64(-32768);
+ // CHECK: <2 x i64> <i64 -32768, i64 -32768>
+ vul = vec_splat_u64(32767);
+ // CHECK: <2 x i64> <i64 32767, i64 32767>
+
+ vsc = vec_splats(sc);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vuc = vec_splats(uc);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vss = vec_splats(ss);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vus = vec_splats(us);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vsi = vec_splats(si);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vui = vec_splats(ui);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vsl = vec_splats(sl);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vul = vec_splats(ul);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vd = vec_splats(d);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+
+ vsl = vec_extend_s64(vsc);
+ vsl = vec_extend_s64(vss);
+ vsl = vec_extend_s64(vsi);
+
+ vsc = vec_mergeh(vsc, vsc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vuc = vec_mergeh(vuc, vuc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vbc = vec_mergeh(vbc, vbc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vss = vec_mergeh(vss, vss);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vus = vec_mergeh(vus, vus);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vbs = vec_mergeh(vbs, vbs);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vsi = vec_mergeh(vsi, vsi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vui = vec_mergeh(vui, vui);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vbi = vec_mergeh(vbi, vbi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vsl = vec_mergeh(vsl, vsl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vul = vec_mergeh(vul, vul);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vbl = vec_mergeh(vbl, vbl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vd = vec_mergeh(vd, vd);
+ // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
+
+ vsc = vec_mergel(vsc, vsc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vuc = vec_mergel(vuc, vuc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vbc = vec_mergel(vbc, vbc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vss = vec_mergel(vss, vss);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vus = vec_mergel(vus, vus);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vbs = vec_mergel(vbs, vbs);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vsi = vec_mergel(vsi, vsi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vui = vec_mergel(vui, vui);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vbi = vec_mergel(vbi, vbi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vsl = vec_mergel(vsl, vsl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vul = vec_mergel(vul, vul);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vbl = vec_mergel(vbl, vbl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vd = vec_mergel(vd, vd);
+ // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <i32 1, i32 3>
+
+ vsc = vec_pack(vss, vss);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vuc = vec_pack(vus, vus);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vbc = vec_pack(vbs, vbs);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vss = vec_pack(vsi, vsi);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vus = vec_pack(vui, vui);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vbs = vec_pack(vbi, vbi);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vsi = vec_pack(vsl, vsl);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ vui = vec_pack(vul, vul);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ vbi = vec_pack(vbl, vbl);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+
+ vsc = vec_packs(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vpksh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packs(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vss = vec_packs(vsi, vsi);
+ // CHECK: call <8 x i16> @llvm.s390.vpksf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packs(vui, vui);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsi = vec_packs(vsl, vsl);
+ // CHECK: call <4 x i32> @llvm.s390.vpksg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packs(vul, vul);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_packs_cc(vss, vss, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpkshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packs_cc(vus, vus, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpklshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vss = vec_packs_cc(vsi, vsi, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpksfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packs_cc(vui, vui, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpklsfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsi = vec_packs_cc(vsl, vsl, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpksgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packs_cc(vul, vul, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpklsgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_packsu(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packsu(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_packsu(vsi, vsi);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packsu(vui, vui);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_packsu(vsl, vsl);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packsu(vul, vul);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_packsu_cc(vus, vus, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpklshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_packsu_cc(vui, vui, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpklsfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_packsu_cc(vul, vul, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpklsgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vss = vec_unpackh(vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vuphb(<16 x i8> %{{.*}})
+ vus = vec_unpackh(vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplhb(<16 x i8> %{{.*}})
+ vbs = vec_unpackh(vbc);
+ // CHECK: call <8 x i16> @llvm.s390.vuphb(<16 x i8> %{{.*}})
+ vsi = vec_unpackh(vss);
+ // CHECK: call <4 x i32> @llvm.s390.vuphh(<8 x i16> %{{.*}})
+ vui = vec_unpackh(vus);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhh(<8 x i16> %{{.*}})
+ vbi = vec_unpackh(vbs);
+ // CHECK: call <4 x i32> @llvm.s390.vuphh(<8 x i16> %{{.*}})
+ vsl = vec_unpackh(vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vuphf(<4 x i32> %{{.*}})
+ vul = vec_unpackh(vui);
+ // CHECK: call <2 x i64> @llvm.s390.vuplhf(<4 x i32> %{{.*}})
+ vbl = vec_unpackh(vbi);
+ // CHECK: call <2 x i64> @llvm.s390.vuphf(<4 x i32> %{{.*}})
+
+ vss = vec_unpackl(vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplb(<16 x i8> %{{.*}})
+ vus = vec_unpackl(vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vupllb(<16 x i8> %{{.*}})
+ vbs = vec_unpackl(vbc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplb(<16 x i8> %{{.*}})
+ vsi = vec_unpackl(vss);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhw(<8 x i16> %{{.*}})
+ vui = vec_unpackl(vus);
+ // CHECK: call <4 x i32> @llvm.s390.vupllh(<8 x i16> %{{.*}})
+ vbi = vec_unpackl(vbs);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhw(<8 x i16> %{{.*}})
+ vsl = vec_unpackl(vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vuplf(<4 x i32> %{{.*}})
+ vul = vec_unpackl(vui);
+ // CHECK: call <2 x i64> @llvm.s390.vupllf(<4 x i32> %{{.*}})
+ vbl = vec_unpackl(vbi);
+ // CHECK: call <2 x i64> @llvm.s390.vuplf(<4 x i32> %{{.*}})
+}
+
+void test_compare(void) {
+ vbc = vec_cmpeq(vsc, vsc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpeq(vuc, vuc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpeq(vbc, vbc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vss, vss);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vus, vus);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vbs, vbs);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vsi, vsi);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vui, vui);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vbi, vbi);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vsl, vsl);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vul, vul);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vbl, vbl);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vd, vd);
+ // CHECK: fcmp oeq <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmpge(vsc, vsc);
+ // CHECK: icmp sge <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpge(vuc, vuc);
+ // CHECK: icmp uge <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpge(vss, vss);
+ // CHECK: icmp sge <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpge(vus, vus);
+ // CHECK: icmp uge <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpge(vsi, vsi);
+ // CHECK: icmp sge <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpge(vui, vui);
+ // CHECK: icmp uge <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vsl, vsl);
+ // CHECK: icmp sge <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vul, vul);
+ // CHECK: icmp uge <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vd, vd);
+ // CHECK: fcmp oge <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmpgt(vsc, vsc);
+ // CHECK: icmp sgt <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpgt(vuc, vuc);
+ // CHECK: icmp ugt <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpgt(vss, vss);
+ // CHECK: icmp sgt <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpgt(vus, vus);
+ // CHECK: icmp ugt <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpgt(vsi, vsi);
+ // CHECK: icmp sgt <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpgt(vui, vui);
+ // CHECK: icmp ugt <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vsl, vsl);
+ // CHECK: icmp sgt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vul, vul);
+ // CHECK: icmp ugt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vd, vd);
+ // CHECK: fcmp ogt <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmple(vsc, vsc);
+ // CHECK: icmp sle <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmple(vuc, vuc);
+ // CHECK: icmp ule <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmple(vss, vss);
+ // CHECK: icmp sle <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmple(vus, vus);
+ // CHECK: icmp ule <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmple(vsi, vsi);
+ // CHECK: icmp sle <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmple(vui, vui);
+ // CHECK: icmp ule <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vsl, vsl);
+ // CHECK: icmp sle <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vul, vul);
+ // CHECK: icmp ule <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vd, vd);
+ // CHECK: fcmp ole <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmplt(vsc, vsc);
+ // CHECK: icmp slt <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmplt(vuc, vuc);
+ // CHECK: icmp ult <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmplt(vss, vss);
+ // CHECK: icmp slt <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmplt(vus, vus);
+ // CHECK: icmp ult <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmplt(vsi, vsi);
+ // CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmplt(vui, vui);
+ // CHECK: icmp ult <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vsl, vsl);
+ // CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vul, vul);
+ // CHECK: icmp ult <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vd, vd);
+ // CHECK: fcmp olt <2 x double> %{{.*}}, %{{.*}}
+
+ idx = vec_all_eq(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_ne(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_ge(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_gt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_le(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_lt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_nge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_ngt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_nle(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_nlt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_nan(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+ idx = vec_all_numeric(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+
+ idx = vec_any_eq(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_ne(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_ge(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_gt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_le(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_lt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_nge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_ngt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_nle(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_nlt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_nan(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+ idx = vec_any_numeric(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+}
+
+void test_integer(void) {
+ vsc = vec_andc(vsc, vsc);
+ vsc = vec_andc(vsc, vbc);
+ vsc = vec_andc(vbc, vsc);
+ vuc = vec_andc(vuc, vuc);
+ vuc = vec_andc(vuc, vbc);
+ vuc = vec_andc(vbc, vuc);
+ vbc = vec_andc(vbc, vbc);
+ vss = vec_andc(vss, vss);
+ vss = vec_andc(vss, vbs);
+ vss = vec_andc(vbs, vss);
+ vus = vec_andc(vus, vus);
+ vus = vec_andc(vus, vbs);
+ vus = vec_andc(vbs, vus);
+ vbs = vec_andc(vbs, vbs);
+ vsi = vec_andc(vsi, vsi);
+ vsi = vec_andc(vsi, vbi);
+ vsi = vec_andc(vbi, vsi);
+ vui = vec_andc(vui, vui);
+ vui = vec_andc(vui, vbi);
+ vui = vec_andc(vbi, vui);
+ vbi = vec_andc(vbi, vbi);
+ vsl = vec_andc(vsl, vsl);
+ vsl = vec_andc(vsl, vbl);
+ vsl = vec_andc(vbl, vsl);
+ vul = vec_andc(vul, vul);
+ vul = vec_andc(vul, vbl);
+ vul = vec_andc(vbl, vul);
+ vbl = vec_andc(vbl, vbl);
+ vd = vec_andc(vd, vd);
+ vd = vec_andc(vd, vbl);
+ vd = vec_andc(vbl, vd);
+
+ vsc = vec_nor(vsc, vsc);
+ vsc = vec_nor(vsc, vbc);
+ vsc = vec_nor(vbc, vsc);
+ vuc = vec_nor(vuc, vuc);
+ vuc = vec_nor(vuc, vbc);
+ vuc = vec_nor(vbc, vuc);
+ vbc = vec_nor(vbc, vbc);
+ vss = vec_nor(vss, vss);
+ vss = vec_nor(vss, vbs);
+ vss = vec_nor(vbs, vss);
+ vus = vec_nor(vus, vus);
+ vus = vec_nor(vus, vbs);
+ vus = vec_nor(vbs, vus);
+ vbs = vec_nor(vbs, vbs);
+ vsi = vec_nor(vsi, vsi);
+ vsi = vec_nor(vsi, vbi);
+ vsi = vec_nor(vbi, vsi);
+ vui = vec_nor(vui, vui);
+ vui = vec_nor(vui, vbi);
+ vui = vec_nor(vbi, vui);
+ vbi = vec_nor(vbi, vbi);
+ vsl = vec_nor(vsl, vsl);
+ vsl = vec_nor(vsl, vbl);
+ vsl = vec_nor(vbl, vsl);
+ vul = vec_nor(vul, vul);
+ vul = vec_nor(vul, vbl);
+ vul = vec_nor(vbl, vul);
+ vbl = vec_nor(vbl, vbl);
+ vd = vec_nor(vd, vd);
+ vd = vec_nor(vd, vbl);
+ vd = vec_nor(vbl, vd);
+
+ vuc = vec_cntlz(vsc);
+ // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vuc = vec_cntlz(vuc);
+ // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vus = vec_cntlz(vss);
+ // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vus = vec_cntlz(vus);
+ // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vui = vec_cntlz(vsi);
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vui = vec_cntlz(vui);
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vul = vec_cntlz(vsl);
+ // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ vul = vec_cntlz(vul);
+ // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+
+ vuc = vec_cnttz(vsc);
+ // CHECK: call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vuc = vec_cnttz(vuc);
+ // CHECK: call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vus = vec_cnttz(vss);
+ // CHECK: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vus = vec_cnttz(vus);
+ // CHECK: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vui = vec_cnttz(vsi);
+ // CHECK: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vui = vec_cnttz(vui);
+ // CHECK: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vul = vec_cnttz(vsl);
+ // CHECK: call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ vul = vec_cnttz(vul);
+ // CHECK: call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %{{.*}}, i1 false)
+
+ vuc = vec_popcnt(vsc);
+ // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}})
+ vuc = vec_popcnt(vuc);
+ // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}})
+ vus = vec_popcnt(vss);
+ // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}})
+ vus = vec_popcnt(vus);
+ // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}})
+ vui = vec_popcnt(vsi);
+ // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}})
+ vui = vec_popcnt(vui);
+ // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}})
+ vul = vec_popcnt(vsl);
+ // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}})
+ vul = vec_popcnt(vul);
+ // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}})
+
+ vsc = vec_rl(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.verllvb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_rl(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.verllvb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_rl(vss, vus);
+ // CHECK: call <8 x i16> @llvm.s390.verllvh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_rl(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.verllvh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_rl(vsi, vui);
+ // CHECK: call <4 x i32> @llvm.s390.verllvf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_rl(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.verllvf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_rl(vsl, vul);
+ // CHECK: call <2 x i64> @llvm.s390.verllvg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_rl(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.verllvg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_rli(vsc, ul);
+ // CHECK: call <16 x i8> @llvm.s390.verllb(<16 x i8> %{{.*}}, i32 %{{.*}})
+ vuc = vec_rli(vuc, ul);
+ // CHECK: call <16 x i8> @llvm.s390.verllb(<16 x i8> %{{.*}}, i32 %{{.*}})
+ vss = vec_rli(vss, ul);
+ // CHECK: call <8 x i16> @llvm.s390.verllh(<8 x i16> %{{.*}}, i32 %{{.*}})
+ vus = vec_rli(vus, ul);
+ // CHECK: call <8 x i16> @llvm.s390.verllh(<8 x i16> %{{.*}}, i32 %{{.*}})
+ vsi = vec_rli(vsi, ul);
+ // CHECK: call <4 x i32> @llvm.s390.verllf(<4 x i32> %{{.*}}, i32 %{{.*}})
+ vui = vec_rli(vui, ul);
+ // CHECK: call <4 x i32> @llvm.s390.verllf(<4 x i32> %{{.*}}, i32 %{{.*}})
+ vsl = vec_rli(vsl, ul);
+ // CHECK: call <2 x i64> @llvm.s390.verllg(<2 x i64> %{{.*}}, i32 %{{.*}})
+ vul = vec_rli(vul, ul);
+ // CHECK: call <2 x i64> @llvm.s390.verllg(<2 x i64> %{{.*}}, i32 %{{.*}})
+
+ vsc = vec_rl_mask(vsc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_rl_mask(vsc, vuc, 255);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 255)
+ vuc = vec_rl_mask(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_rl_mask(vuc, vuc, 255);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 255)
+ vss = vec_rl_mask(vss, vus, 0);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vss = vec_rl_mask(vss, vus, 255);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 255)
+ vus = vec_rl_mask(vus, vus, 0);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_rl_mask(vus, vus, 255);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 255)
+ vsi = vec_rl_mask(vsi, vui, 0);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vsi = vec_rl_mask(vsi, vui, 255);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 255)
+ vui = vec_rl_mask(vui, vui, 0);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_rl_mask(vui, vui, 255);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 255)
+ vsl = vec_rl_mask(vsl, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vsl = vec_rl_mask(vsl, vul, 255);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 255)
+ vul = vec_rl_mask(vul, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vul = vec_rl_mask(vul, vul, 255);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 255)
+
+ vsc = vec_sll(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sll(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sll(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_slb(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_slb(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_slb(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_slb(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_slb(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_slb(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_slb(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_slb(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_slb(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_slb(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_slb(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_slb(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_slb(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_slb(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_slb(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_slb(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_slb(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_slb(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_sld(vsc, vsc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_sld(vsc, vsc, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vuc = vec_sld(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_sld(vuc, vuc, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vss = vec_sld(vss, vss, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_sld(vss, vss, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vus = vec_sld(vus, vus, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_sld(vus, vus, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vsi = vec_sld(vsi, vsi, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsi = vec_sld(vsi, vsi, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vui = vec_sld(vui, vui, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vui = vec_sld(vui, vui, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vsl = vec_sld(vsl, vsl, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsl = vec_sld(vsl, vsl, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vul = vec_sld(vul, vul, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vul = vec_sld(vul, vul, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vd = vec_sld(vd, vd, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vd = vec_sld(vd, vd, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+
+ vsc = vec_sldw(vsc, vsc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_sldw(vsc, vsc, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vuc = vec_sldw(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_sldw(vuc, vuc, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vss = vec_sldw(vss, vss, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_sldw(vss, vss, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vus = vec_sldw(vus, vus, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_sldw(vus, vus, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vsi = vec_sldw(vsi, vsi, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsi = vec_sldw(vsi, vsi, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vui = vec_sldw(vui, vui, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vui = vec_sldw(vui, vui, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vsl = vec_sldw(vsl, vsl, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsl = vec_sldw(vsl, vsl, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vul = vec_sldw(vul, vul, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vul = vec_sldw(vul, vul, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vd = vec_sldw(vd, vd, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vd = vec_sldw(vd, vd, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+
+ vsc = vec_sral(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sral(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sral(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srab(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srab(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srab(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srab(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srab(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srab(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srab(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srab(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srab(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srab(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srab(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srab(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srab(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srab(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srab(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srab(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srab(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srab(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srl(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srl(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srl(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srb(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srb(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srb(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srb(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srb(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srb(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srb(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srb(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srb(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srb(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srb(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srb(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srb(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srb(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srb(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srb(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srb(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srb(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_abs(vsc);
+ vss = vec_abs(vss);
+ vsi = vec_abs(vsi);
+ vsl = vec_abs(vsl);
+
+ vsc = vec_max(vsc, vsc);
+ vsc = vec_max(vsc, vbc);
+ vsc = vec_max(vbc, vsc);
+ vuc = vec_max(vuc, vuc);
+ vuc = vec_max(vuc, vbc);
+ vuc = vec_max(vbc, vuc);
+ vss = vec_max(vss, vss);
+ vss = vec_max(vss, vbs);
+ vss = vec_max(vbs, vss);
+ vus = vec_max(vus, vus);
+ vus = vec_max(vus, vbs);
+ vus = vec_max(vbs, vus);
+ vsi = vec_max(vsi, vsi);
+ vsi = vec_max(vsi, vbi);
+ vsi = vec_max(vbi, vsi);
+ vui = vec_max(vui, vui);
+ vui = vec_max(vui, vbi);
+ vui = vec_max(vbi, vui);
+ vsl = vec_max(vsl, vsl);
+ vsl = vec_max(vsl, vbl);
+ vsl = vec_max(vbl, vsl);
+ vul = vec_max(vul, vul);
+ vul = vec_max(vul, vbl);
+ vul = vec_max(vbl, vul);
+ vd = vec_max(vd, vd);
+
+ vsc = vec_min(vsc, vsc);
+ vsc = vec_min(vsc, vbc);
+ vsc = vec_min(vbc, vsc);
+ vuc = vec_min(vuc, vuc);
+ vuc = vec_min(vuc, vbc);
+ vuc = vec_min(vbc, vuc);
+ vss = vec_min(vss, vss);
+ vss = vec_min(vss, vbs);
+ vss = vec_min(vbs, vss);
+ vus = vec_min(vus, vus);
+ vus = vec_min(vus, vbs);
+ vus = vec_min(vbs, vus);
+ vsi = vec_min(vsi, vsi);
+ vsi = vec_min(vsi, vbi);
+ vsi = vec_min(vbi, vsi);
+ vui = vec_min(vui, vui);
+ vui = vec_min(vui, vbi);
+ vui = vec_min(vbi, vui);
+ vsl = vec_min(vsl, vsl);
+ vsl = vec_min(vsl, vbl);
+ vsl = vec_min(vbl, vsl);
+ vul = vec_min(vul, vul);
+ vul = vec_min(vul, vbl);
+ vul = vec_min(vbl, vul);
+ vd = vec_min(vd, vd);
+
+ vuc = vec_addc(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaccb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_addc(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vacch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_addc(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vaccf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_addc(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vaccg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_add_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_addc_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaccq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_adde_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vacq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_addec_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vacccq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_avg(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vavgb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_avg(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vavglb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_avg(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vavgh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_avg(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vavglh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_avg(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vavgf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_avg(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vavglf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_avg(vsl, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vavgg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_avg(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vavglg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vui = vec_checksum(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vcksm(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vus = vec_gfmsum(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vgfmb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_gfmsum(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vgfmh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_gfmsum(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vgfmf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_gfmsum_128(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vgfmg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vus = vec_gfmsum_accum(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vgfmab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_gfmsum_accum(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vgfmah(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_gfmsum_accum(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vgfmaf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vuc = vec_gfmsum_accum_128(vul, vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vgfmag(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_mladd(vsc, vsc, vsc);
+ vsc = vec_mladd(vuc, vsc, vsc);
+ vsc = vec_mladd(vsc, vuc, vuc);
+ vuc = vec_mladd(vuc, vuc, vuc);
+ vss = vec_mladd(vss, vss, vss);
+ vss = vec_mladd(vus, vss, vss);
+ vss = vec_mladd(vss, vus, vus);
+ vus = vec_mladd(vus, vus, vus);
+ vsi = vec_mladd(vsi, vsi, vsi);
+ vsi = vec_mladd(vui, vsi, vsi);
+ vsi = vec_mladd(vsi, vui, vui);
+ vui = vec_mladd(vui, vui, vui);
+
+ vsc = vec_mhadd(vsc, vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vmahb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_mhadd(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vmalhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_mhadd(vss, vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmahh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_mhadd(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmalhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_mhadd(vsi, vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmahf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_mhadd(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmalhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_meadd(vsc, vsc, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_meadd(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmaleb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_meadd(vss, vss, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_meadd(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmaleh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_meadd(vsi, vsi, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vmaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_meadd(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vmalef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+
+ vss = vec_moadd(vsc, vsc, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmaob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_moadd(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmalob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_moadd(vss, vss, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmaoh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_moadd(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmaloh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_moadd(vsi, vsi, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vmaof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_moadd(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vmalof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_mulh(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vmhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_mulh(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vmlhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_mulh(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_mulh(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmlhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_mulh(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_mulh(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmlhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_mule(vsc, vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vmeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_mule(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vmleb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_mule(vss, vss);
+ // CHECK: call <4 x i32> @llvm.s390.vmeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_mule(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vmleh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsl = vec_mule(vsi, vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vmef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_mule(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vmlef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_mulo(vsc, vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vmob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_mulo(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vmlob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_mulo(vss, vss);
+ // CHECK: call <4 x i32> @llvm.s390.vmoh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_mulo(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vmloh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsl = vec_mulo(vsi, vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vmof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_mulo(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vmlof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vuc = vec_subc(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vscbib(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_subc(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vscbih(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_subc(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vscbif(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_subc(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vscbig(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_sub_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_subc_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vscbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sube_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_subec_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsbcbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vui = vec_sum4(vuc, vuc);
+ // CHECK: call <4 x i32> @llvm.s390.vsumb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sum4(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vsumh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_sum2(vus, vus);
+ // CHECK: call <2 x i64> @llvm.s390.vsumgh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_sum2(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vsumgf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_sum_u128(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsumqf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_sum_u128(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsumqg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ idx = vec_test_mask(vsc, vuc);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vuc, vuc);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vss, vus);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vus, vus);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vsi, vui);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vui, vui);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vsl, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vul, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vd, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+}
+
+void test_string(void) {
+ vsc = vec_cp_until_zero(vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vuc = vec_cp_until_zero(vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vbc = vec_cp_until_zero(vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vss = vec_cp_until_zero(vss);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vus = vec_cp_until_zero(vus);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vbs = vec_cp_until_zero(vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vsi = vec_cp_until_zero(vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+ vui = vec_cp_until_zero(vui);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+ vbi = vec_cp_until_zero(vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+
+ vsc = vec_cp_until_zero_cc(vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vuc = vec_cp_until_zero_cc(vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vbc = vec_cp_until_zero_cc(vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vss = vec_cp_until_zero_cc(vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vus = vec_cp_until_zero_cc(vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vbs = vec_cp_until_zero_cc(vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vsi = vec_cp_until_zero_cc(vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+ vui = vec_cp_until_zero_cc(vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+ vbi = vec_cp_until_zero_cc(vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vbc = vec_cmprg(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_cmprg(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_cmprg(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vbc = vec_cmprg_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_cmprg_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_cmprg_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vuc = vec_cmprg_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_or_0_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrczb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_or_0_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrczh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_or_0_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrczf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_or_0_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrczbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_or_0_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrczhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_or_0_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrczfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vbc = vec_cmpnrg(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_cmpnrg(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_cmpnrg(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vbc = vec_cmpnrg_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_cmpnrg_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_cmpnrg_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vuc = vec_cmpnrg_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_or_0_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrczb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_or_0_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrczh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_or_0_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrczf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_or_0_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrczbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_or_0_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrczhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_or_0_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrczfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vbc = vec_find_any_eq(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vbc = vec_find_any_eq_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vsc = vec_find_any_eq_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vbc = vec_find_any_ne(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vbc = vec_find_any_ne_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vsc = vec_find_any_ne_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+}
+
+void test_float(void) {
+ vd = vec_abs(vd);
+ // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+
+ vd = vec_nabs(vd);
+ // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+ // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[ABS]]
+
+ vd = vec_madd(vd, vd, vd);
+ // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ vd = vec_msub(vd, vd, vd);
+ // CHECK: [[NEG:%[^ ]+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.*}}
+ // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
+ vd = vec_sqrt(vd);
+ // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{.*}})
+
+ vd = vec_ld2f(cptrf);
+ // CHECK: [[VAL:%[^ ]+]] = load <2 x float>, <2 x float>* %{{.*}}
+ // CHECK: fpext <2 x float> [[VAL]] to <2 x double>
+ vec_st2f(vd, ptrf);
+ // CHECK: [[VAL:%[^ ]+]] = fptrunc <2 x double> %{{.*}} to <2 x float>
+ // CHECK: store <2 x float> [[VAL]], <2 x float>* %{{.*}}
+
+ vd = vec_ctd(vsl, 0);
+ // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
+ vd = vec_ctd(vul, 0);
+ // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
+ vd = vec_ctd(vsl, 1);
+ // CHECK: [[VAL:%[^ ]+]] = sitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 5.000000e-01, double 5.000000e-01>
+ vd = vec_ctd(vul, 1);
+ // CHECK: [[VAL:%[^ ]+]] = uitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 5.000000e-01, double 5.000000e-01>
+ vd = vec_ctd(vsl, 31);
+ // CHECK: [[VAL:%[^ ]+]] = sitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 0x3E00000000000000, double 0x3E00000000000000>
+ vd = vec_ctd(vul, 31);
+ // CHECK: [[VAL:%[^ ]+]] = uitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 0x3E00000000000000, double 0x3E00000000000000>
+
+ vsl = vec_ctsl(vd, 0);
+ // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
+ vul = vec_ctul(vd, 0);
+ // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
+ vsl = vec_ctsl(vd, 1);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 2.000000e+00, double 2.000000e+00>
+ // CHECK: fptosi <2 x double> [[VAL]] to <2 x i64>
+ vul = vec_ctul(vd, 1);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 2.000000e+00, double 2.000000e+00>
+ // CHECK: fptoui <2 x double> [[VAL]] to <2 x i64>
+ vsl = vec_ctsl(vd, 31);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 0x41E0000000000000, double 0x41E0000000000000>
+ // CHECK: fptosi <2 x double> [[VAL]] to <2 x i64>
+ vul = vec_ctul(vd, 31);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 0x41E0000000000000, double 0x41E0000000000000>
+ // CHECK: fptoui <2 x double> [[VAL]] to <2 x i64>
+
+ vd = vec_roundp(vd);
+ // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}})
+ vd = vec_ceil(vd);
+ // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundm(vd);
+ // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}})
+ vd = vec_floor(vd);
+ // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundz(vd);
+ // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}})
+ vd = vec_trunc(vd);
+ // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundc(vd);
+ // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{.*}})
+ vd = vec_round(vd);
+ // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
+
+ vbl = vec_fp_test_data_class(vd, 0, &cc);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 0)
+ vbl = vec_fp_test_data_class(vd, 4095, &cc);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
+}
diff --git a/test/CodeGen/integer-overflow.c b/test/CodeGen/integer-overflow.c
index de3b53f4b5b3..6a7c3e51ee1b 100644
--- a/test/CodeGen/integer-overflow.c
+++ b/test/CodeGen/integer-overflow.c
@@ -72,4 +72,11 @@ void test1() {
// TRAPV: add i8 {{.*}}, 1
// CATCH_UB: add i8 {{.*}}, 1
++PR9350;
+
+ // PR24256: don't instrument __builtin_frame_address.
+ __builtin_frame_address(0 + 0);
+ // DEFAULT: call i8* @llvm.frameaddress(i32 0)
+ // WRAPV: call i8* @llvm.frameaddress(i32 0)
+ // TRAPV: call i8* @llvm.frameaddress(i32 0)
+ // CATCH_UB: call i8* @llvm.frameaddress(i32 0)
}
diff --git a/test/CodeGen/le32-regparm.c b/test/CodeGen/le32-regparm.c
index c8f70694c43d..ecb1030aa1ff 100644
--- a/test/CodeGen/le32-regparm.c
+++ b/test/CodeGen/le32-regparm.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple le32-unknown-nacl %s -fsyntax-only -verify
+// RUN: %clang_cc1 -triple aarch64 %s -fsyntax-only -verify
void __attribute__((regparm(2))) fc_f1(int i, int j, int k) {} // expected-error{{'regparm' is not valid on this platform}}
diff --git a/test/CodeGen/long_double_fp128.cpp b/test/CodeGen/long_double_fp128.cpp
new file mode 100644
index 000000000000..1780255cea97
--- /dev/null
+++ b/test/CodeGen/long_double_fp128.cpp
@@ -0,0 +1,22 @@
+// RUN: %clang_cc1 -triple x86_64-linux-android -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=A64
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=G64
+// RUN: %clang_cc1 -triple powerpc64-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=P64
+// RUN: %clang_cc1 -triple i686-linux-android -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=A32
+// RUN: %clang_cc1 -triple i686-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=G32
+// RUN: %clang_cc1 -triple powerpc-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=P32
+
+// Check mangled name of long double.
+// Android's gcc and llvm use fp128 for long double.
+void test(long, float, double, long double, long double _Complex) { }
+// A64: define void @_Z4testlfdgCg(i64, float, double, fp128, { fp128, fp128 }*
+// G64: define void @_Z4testlfdeCe(i64, float, double, x86_fp80, { x86_fp80, x86_fp80 }*
+// P64: define void @_Z4testlfdgCg(i64, float, double, ppc_fp128, ppc_fp128 {{.*}}, ppc_fp128
+// A32: define void @_Z4testlfdeCe(i32, float, double, double, { double, double }*
+// G32: define void @_Z4testlfdeCe(i32, float, double, x86_fp80, { x86_fp80, x86_fp80 }*
+// P32: define void @_Z4testlfdgCg(i32, float, double, ppc_fp128, { ppc_fp128, ppc_fp128 }*
diff --git a/test/CodeGen/palignr.c b/test/CodeGen/palignr.c
index 1712df5256ce..5a77597c3403 100644
--- a/test/CodeGen/palignr.c
+++ b/test/CodeGen/palignr.c
@@ -4,13 +4,13 @@
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
typedef __attribute__((vector_size(16))) int int4;
-// CHECK: palignr
+// CHECK: palignr $15, %xmm1, %xmm0
int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
// CHECK: ret
// CHECK: ret
// CHECK-NOT: palignr
int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); }
-// CHECK: psrldq
+// CHECK: psrldq $1, %xmm0
int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); }
// CHECK: xor
int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); }
diff --git a/test/CodeGen/x86_64-fp128.c b/test/CodeGen/x86_64-fp128.c
new file mode 100644
index 000000000000..0147721f9b6a
--- /dev/null
+++ b/test/CodeGen/x86_64-fp128.c
@@ -0,0 +1,115 @@
+// RUN: %clang_cc1 -triple x86_64-linux-android -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=ANDROID --check-prefix=CHECK
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=GNU --check-prefix=CHECK
+// RUN: %clang_cc1 -triple x86_64 -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=GNU --check-prefix=CHECK
+
+// Android uses fp128 for long double but other x86_64 targets use x86_fp80.
+
+long double dataLD = 1.0L;
+// ANDROID: @dataLD = global fp128 0xL00000000000000003FFF000000000000, align 16
+// GNU: @dataLD = global x86_fp80 0xK3FFF8000000000000000, align 16
+
+long double _Complex dataLDC = {1.0L, 1.0L};
+// ANDROID: @dataLDC = global { fp128, fp128 } { fp128 0xL00000000000000003FFF000000000000, fp128 0xL00000000000000003FFF000000000000 }, align 16
+// GNU: @dataLDC = global { x86_fp80, x86_fp80 } { x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000 }, align 16
+
+long double TestLD(long double x) {
+ return x * x;
+// ANDROID: define fp128 @TestLD(fp128 %x)
+// GNU: define x86_fp80 @TestLD(x86_fp80 %x)
+}
+
+long double _Complex TestLDC(long double _Complex x) {
+ return x * x;
+// ANDROID: define void @TestLDC({ fp128, fp128 }* {{.*}}, { fp128, fp128 }* {{.*}} %x)
+// GNU: define { x86_fp80, x86_fp80 } @TestLDC({ x86_fp80, x86_fp80 }* {{.*}} %x)
+}
+
+typedef __builtin_va_list va_list;
+
+int TestGetVarInt(va_list ap) {
+ return __builtin_va_arg(ap, int);
+// Since int can be passed in memory or register there are two branches.
+// CHECK: define i32 @TestGetVarInt(
+// CHECK: br label
+// CHECK: br label
+// CHECK: = phi
+// CHECK: ret i32
+}
+
+double TestGetVarDouble(va_list ap) {
+ return __builtin_va_arg(ap, double);
+// Since double can be passed in memory or register there are two branches.
+// CHECK: define double @TestGetVarDouble(
+// CHECK: br label
+// CHECK: br label
+// CHECK: = phi
+// CHECK: ret double
+}
+
+long double TestGetVarLD(va_list ap) {
+ return __builtin_va_arg(ap, long double);
+// fp128 can be passed in memory or in register, but x86_fp80 is in memory.
+// ANDROID: define fp128 @TestGetVarLD(
+// GNU: define x86_fp80 @TestGetVarLD(
+// ANDROID: br label
+// ANDROID: br label
+// ANDROID: = phi
+// GNU-NOT: br
+// GNU-NOT: = phi
+// ANDROID: ret fp128
+// GNU: ret x86_fp80
+}
+
+long double _Complex TestGetVarLDC(va_list ap) {
+ return __builtin_va_arg(ap, long double _Complex);
+// Pair of fp128 or x86_fp80 are passed as struct in memory.
+// ANDROID: define void @TestGetVarLDC({ fp128, fp128 }* {{.*}}, %struct.__va_list_tag*
+// GNU: define { x86_fp80, x86_fp80 } @TestGetVarLDC(
+// CHECK-NOT: br
+// CHECK-NOT: phi
+// ANDROID: ret void
+// GNU: ret { x86_fp80, x86_fp80 }
+}
+
+void TestVarArg(const char *s, ...);
+
+void TestPassVarInt(int x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarInt(i32 %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, i32 %x)
+}
+
+void TestPassVarFloat(float x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarFloat(float %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, double %
+}
+
+void TestPassVarDouble(double x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarDouble(double %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, double %x
+}
+
+void TestPassVarLD(long double x) {
+ TestVarArg("A", x);
+// ANDROID: define void @TestPassVarLD(fp128 %x)
+// ANDROID: call {{.*}} @TestVarArg(i8* {{.*}}, fp128 %x
+// GNU: define void @TestPassVarLD(x86_fp80 %x)
+// GNU: call {{.*}} @TestVarArg(i8* {{.*}}, x86_fp80 %x
+}
+
+void TestPassVarLDC(long double _Complex x) {
+ TestVarArg("A", x);
+// ANDROID: define void @TestPassVarLDC({ fp128, fp128 }* {{.*}} %x)
+// ANDROID: store fp128 %{{.*}}, fp128* %
+// ANDROID-NEXT: store fp128 %{{.*}}, fp128* %
+// ANDROID-NEXT: call {{.*}} @TestVarArg(i8* {{.*}}, { fp128, fp128 }* {{.*}} %
+// GNU: define void @TestPassVarLDC({ x86_fp80, x86_fp80 }* {{.*}} %x)
+// GNU: store x86_fp80 %{{.*}}, x86_fp80* %
+// GNU-NEXT: store x86_fp80 %{{.*}}, x86_fp80* %
+// GNGNU-NEXT: call {{.*}} @TestVarArg(i8* {{.*}}, { x86_fp80, x86_fp80 }* {{.*}} %
+}
diff --git a/test/CodeGen/zvector.c b/test/CodeGen/zvector.c
new file mode 100644
index 000000000000..ebe7e415e1db
--- /dev/null
+++ b/test/CodeGen/zvector.c
@@ -0,0 +1,2798 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector \
+// RUN: -O -emit-llvm -o - -W -Wall -Werror %s | FileCheck %s
+
+volatile vector signed char sc, sc2;
+volatile vector unsigned char uc, uc2;
+volatile vector bool char bc, bc2;
+
+volatile vector signed short ss, ss2;
+volatile vector unsigned short us, us2;
+volatile vector bool short bs, bs2;
+
+volatile vector signed int si, si2;
+volatile vector unsigned int ui, ui2;
+volatile vector bool int bi, bi2;
+
+volatile vector signed long long sl, sl2;
+volatile vector unsigned long long ul, ul2;
+volatile vector bool long long bl, bl2;
+
+volatile vector double fd, fd2;
+
+volatile int cnt;
+
+void test_assign (void)
+{
+// CHECK-LABEL: test_assign
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
+ sc = sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
+ uc = uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
+ ss = ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
+ us = us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
+ si = si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
+ ui = ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
+ sl = sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
+ ul = ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
+ fd = fd2;
+}
+
+void test_pos (void)
+{
+// CHECK-LABEL: test_pos
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
+ sc = +sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
+ uc = +uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
+ ss = +ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
+ us = +us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
+ si = +si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
+ ui = +ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
+ sl = +sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
+ ul = +ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
+ fd = +fd2;
+}
+
+void test_neg (void)
+{
+// CHECK-LABEL: test_neg
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> zeroinitializer, [[VAL]]
+ sc = -sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> zeroinitializer, [[VAL]]
+ ss = -ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> zeroinitializer, [[VAL]]
+ si = -si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> zeroinitializer, [[VAL]]
+ sl = -sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[VAL]]
+ fd = -fd2;
+}
+
+void test_preinc (void)
+{
+// CHECK-LABEL: test_preinc
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ++sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ++uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ++ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ++us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ++si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ++ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ++sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ++ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
+ ++fd2;
+}
+
+void test_postinc (void)
+{
+// CHECK-LABEL: test_postinc
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ sc2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ uc2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ss2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ us2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ si2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ui2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ sl2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ul2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
+ fd2++;
+}
+
+void test_predec (void)
+{
+// CHECK-LABEL: test_predec
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ --sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ --uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ --ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ --us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ --si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ --ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ --sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ --ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
+ --fd2;
+}
+
+void test_postdec (void)
+{
+// CHECK-LABEL: test_postdec
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ sc2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ uc2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ss2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ us2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ si2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ ui2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ sl2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ ul2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
+ fd2--;
+}
+
+void test_add (void)
+{
+// CHECK-LABEL: test_add
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc + sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc + bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc + sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc + uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc + bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc + uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss + ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss + bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs + ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = us + us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = us + bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs + us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = si + si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = si + bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi + si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui + ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui + bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi + ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl + sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl + bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl + sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul + ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul + bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl + ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd + fd2;
+}
+
+void test_add_assign (void)
+{
+// CHECK-LABEL: test_add_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ sc += sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ sc += bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ uc += uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ uc += bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ ss += ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ ss += bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ us += us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ us += bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ si += si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ si += bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ ui += ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ ui += bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ sl += sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ sl += bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ ul += ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ ul += bl2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL2]], [[VAL1]]
+ fd += fd2;
+}
+
+void test_sub (void)
+{
+// CHECK-LABEL: test_sub
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc - sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc - bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = bc - sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc - uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc - bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = bc - uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss - ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss - bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = bs - ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = us - us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = us - bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = bs - us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = si - si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = si - bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = bi - si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui - ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui - bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = bi - ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl - sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl - bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = bl - sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul - ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul - bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = bl - ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd - fd2;
+}
+
+void test_sub_assign (void)
+{
+// CHECK-LABEL: test_sub_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc -= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc -= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc -= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc -= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss -= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss -= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us -= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us -= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si -= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si -= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui -= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui -= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl -= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl -= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul -= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul -= bl2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
+ fd -= fd2;
+}
+
+void test_mul (void)
+{
+// CHECK-LABEL: test_mul
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc * sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc * uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss * ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
+ us = us * us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
+ si = si * si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui * ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl * sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul * ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fmul <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd * fd2;
+}
+
+void test_mul_assign (void)
+{
+// CHECK-LABEL: test_mul_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
+ sc *= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
+ uc *= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
+ ss *= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
+ us *= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
+ si *= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
+ ui *= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
+ sl *= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
+ ul *= ul2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fmul <2 x double> [[VAL2]], [[VAL1]]
+ fd *= fd2;
+}
+
+void test_div (void)
+{
+// CHECK-LABEL: test_div
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc / sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc / uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss / ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
+ us = us / us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
+ si = si / si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui / ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl / sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul / ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd / fd2;
+}
+
+void test_div_assign (void)
+{
+// CHECK-LABEL: test_div_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
+ sc /= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
+ uc /= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
+ ss /= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
+ us /= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
+ si /= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
+ ui /= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
+ sl /= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
+ ul /= ul2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
+ fd /= fd2;
+}
+
+void test_rem (void)
+{
+// CHECK-LABEL: test_rem
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc % sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc % uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss % ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
+ us = us % us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
+ si = si % si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui % ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl % sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul % ul2;
+}
+
+void test_rem_assign (void)
+{
+// CHECK-LABEL: test_rem_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
+ sc %= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
+ uc %= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
+ ss %= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
+ us %= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
+ si %= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
+ ui %= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
+ sl %= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
+ ul %= ul2;
+}
+
+void test_not (void)
+{
+// CHECK-LABEL: test_not
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ sc = ~sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ uc = ~uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ bc = ~bc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ss = ~ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ us = ~us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ bs = ~bs2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ si = ~si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ ui = ~ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ bi = ~bi2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ sl = ~sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ ul = ~ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ bl = ~bl2;
+}
+
+void test_and (void)
+{
+// CHECK-LABEL: test_and
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc & sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc & bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc & sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc & uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc & bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc & uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ bc = bc & bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss & ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss & bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs & ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = us & us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = us & bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs & us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ bs = bs & bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = si & si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = si & bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi & si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui & ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui & bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi & ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ bi = bi & bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl & sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl & bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl & sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul & ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul & bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl & ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ bl = bl & bl2;
+}
+
+void test_and_assign (void)
+{
+// CHECK-LABEL: test_and_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ sc &= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ sc &= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ uc &= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ uc &= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ bc &= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ ss &= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ ss &= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ us &= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ us &= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ bs &= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ si &= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ si &= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ ui &= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ ui &= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ bi &= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ sl &= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ sl &= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ ul &= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ ul &= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ bl &= bl2;
+}
+
+void test_or (void)
+{
+// CHECK-LABEL: test_or
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc | sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc | bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc | sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc | uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc | bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc | uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ bc = bc | bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss | ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss | bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs | ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = us | us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = us | bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs | us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ bs = bs | bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = si | si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = si | bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi | si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui | ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui | bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi | ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ bi = bi | bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl | sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl | bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl | sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul | ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul | bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl | ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ bl = bl | bl2;
+}
+
+void test_or_assign (void)
+{
+// CHECK-LABEL: test_or_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ sc |= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ sc |= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ uc |= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ uc |= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ bc |= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ ss |= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ ss |= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ us |= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ us |= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ bs |= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ si |= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ si |= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ ui |= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ ui |= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ bi |= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ sl |= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ sl |= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ ul |= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ ul |= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ bl |= bl2;
+}
+
+void test_xor (void)
+{
+// CHECK-LABEL: test_xor
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc ^ sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc ^ bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = bc ^ sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc ^ uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc ^ bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = bc ^ uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ bc = bc ^ bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss ^ ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss ^ bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = bs ^ ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = us ^ us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = us ^ bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = bs ^ us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ bs = bs ^ bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = si ^ si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = si ^ bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = bi ^ si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui ^ ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui ^ bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = bi ^ ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ bi = bi ^ bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl ^ sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl ^ bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = bl ^ sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul ^ ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul ^ bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = bl ^ ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ bl = bl ^ bl2;
+}
+
+void test_xor_assign (void)
+{
+// CHECK-LABEL: test_xor_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ sc ^= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ sc ^= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ uc ^= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ uc ^= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ bc ^= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ ss ^= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ ss ^= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ us ^= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ us ^= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ bs ^= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ si ^= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ si ^= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ ui ^= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ ui ^= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ bi ^= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ sl ^= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ sl ^= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ ul ^= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ ul ^= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ bl ^= bl2;
+}
+
+void test_sl (void)
+{
+// CHECK-LABEL: test_sl
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc = sc << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc = uc << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss = ss << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us = us << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si = si << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui = ui << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl = sl << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul = ul << 5;
+}
+
+void test_sl_assign (void)
+{
+// CHECK-LABEL: test_sl_assign
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul <<= 5;
+}
+
+void test_sr (void)
+{
+// CHECK-LABEL: test_sr
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc = sc >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc = uc >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss = ss >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us = us >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si = si >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui = ui >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl = sl >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul = ul >> 5;
+}
+
+void test_sr_assign (void)
+{
+// CHECK-LABEL: test_sr_assign
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul >>= 5;
+}
+
+
+void test_cmpeq (void)
+{
+// CHECK-LABEL: test_cmpeq
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc == sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc == bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc == uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc == bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss == ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss == bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us == us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us == bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si == si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si == bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui == ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui == bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl == sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl == bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul == ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul == bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp oeq <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd == fd2;
+}
+
+void test_cmpne (void)
+{
+// CHECK-LABEL: test_cmpne
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc != sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc != bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc != uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc != bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss != ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss != bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us != us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us != bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si != si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si != bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui != ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui != bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl != sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl != bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul != ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul != bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp une <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd != fd2;
+}
+
+void test_cmpge (void)
+{
+// CHECK-LABEL: test_cmpge
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc >= sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc >= uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc >= bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss >= ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us >= us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs >= bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si >= si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui >= ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi >= bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl >= sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul >= ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl >= bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp oge <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd >= fd2;
+}
+
+void test_cmpgt (void)
+{
+// CHECK-LABEL: test_cmpgt
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc > sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc > uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc > bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss > ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us > us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs > bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si > si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui > ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi > bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl > sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul > ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl > bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp ogt <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd > fd2;
+}
+
+void test_cmple (void)
+{
+// CHECK-LABEL: test_cmple
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc <= sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc <= uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc <= bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss <= ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us <= us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs <= bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si <= si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui <= ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi <= bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl <= sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul <= ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl <= bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp ole <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd <= fd2;
+}
+
+void test_cmplt (void)
+{
+// CHECK-LABEL: test_cmplt
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc < sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc < uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc < bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss < ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us < us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs < bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si < si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui < ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi < bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl < sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul < ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl < bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp olt <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd < fd2;
+}
+
diff --git a/test/CodeGenCXX/dllimport-rtti.cpp b/test/CodeGenCXX/dllimport-rtti.cpp
index 8c0f86306cde..6fe67daa38d7 100644
--- a/test/CodeGenCXX/dllimport-rtti.cpp
+++ b/test/CodeGenCXX/dllimport-rtti.cpp
@@ -15,3 +15,16 @@ struct __declspec(dllimport) S {
struct U : S {
} u;
+
+struct __declspec(dllimport) V {
+ virtual void f();
+} v;
+// GNU-DAG: @_ZTV1V = external dllimport
+
+struct W {
+ __declspec(dllimport) virtual void f();
+ virtual void g();
+} w;
+// GNU-DAG: @_ZTV1W = linkonce_odr
+// GNU-DAG: @_ZTS1W = linkonce_odr
+// GNU-DAG: @_ZTI1W = linkonce_odr
diff --git a/test/CodeGenCXX/thunks.cpp b/test/CodeGenCXX/thunks.cpp
index 0e97255cc6fa..38afb9d0dbf7 100644
--- a/test/CodeGenCXX/thunks.cpp
+++ b/test/CodeGenCXX/thunks.cpp
@@ -361,6 +361,23 @@ namespace Test15 {
// CHECK: declare void @_ZThn8_N6Test151C1fEiz
}
+namespace Test16 {
+struct A {
+ virtual ~A();
+};
+struct B {
+ virtual void foo();
+};
+struct C : public A, public B {
+ void foo() {}
+};
+struct D : public C {
+ ~D();
+};
+D::~D() {}
+// CHECK: define linkonce_odr void @_ZThn8_N6Test161C3fooEv({{.*}}) {{.*}} comdat
+}
+
/**** The following has to go at the end of the file ****/
// This is from Test5:
diff --git a/test/CodeGenObjC/Inputs/nsvalue-boxed-expressions-support.h b/test/CodeGenObjC/Inputs/nsvalue-boxed-expressions-support.h
new file mode 100644
index 000000000000..cbcf882b27e3
--- /dev/null
+++ b/test/CodeGenObjC/Inputs/nsvalue-boxed-expressions-support.h
@@ -0,0 +1,63 @@
+#ifndef NSVALUE_BOXED_EXPRESSIONS_SUPPORT_H
+#define NSVALUE_BOXED_EXPRESSIONS_SUPPORT_H
+
+#define BOXABLE __attribute__((objc_boxable))
+
+typedef unsigned long NSUInteger;
+typedef double CGFloat;
+
+typedef struct BOXABLE _NSRange {
+ NSUInteger location;
+ NSUInteger length;
+} NSRange;
+
+typedef struct BOXABLE _NSPoint {
+ CGFloat x;
+ CGFloat y;
+} NSPoint;
+
+typedef struct BOXABLE _NSSize {
+ CGFloat width;
+ CGFloat height;
+} NSSize;
+
+typedef struct BOXABLE _NSRect {
+ NSPoint origin;
+ NSSize size;
+} NSRect;
+
+struct CGPoint {
+ CGFloat x;
+ CGFloat y;
+};
+typedef struct BOXABLE CGPoint CGPoint;
+
+struct CGSize {
+ CGFloat width;
+ CGFloat height;
+};
+typedef struct BOXABLE CGSize CGSize;
+
+struct CGRect {
+ CGPoint origin;
+ CGSize size;
+};
+typedef struct BOXABLE CGRect CGRect;
+
+struct NSEdgeInsets {
+ CGFloat top;
+ CGFloat left;
+ CGFloat bottom;
+ CGFloat right;
+};
+typedef struct BOXABLE NSEdgeInsets NSEdgeInsets;
+
+@interface NSValue
+
++ (NSValue *)valueWithBytes:(const void *)value objCType:(const char *)type;
+
+@end
+
+NSRange getRange();
+
+#endif // NSVALUE_BOXED_EXPRESSIONS_SUPPORT_H
diff --git a/test/CodeGenObjC/nsvalue-objc-boxable-ios-arc.m b/test/CodeGenObjC/nsvalue-objc-boxable-ios-arc.m
new file mode 100644
index 000000000000..75e04dbd0272
--- /dev/null
+++ b/test/CodeGenObjC/nsvalue-objc-boxable-ios-arc.m
@@ -0,0 +1,126 @@
+// RUN: %clang_cc1 -I %S/Inputs -triple armv7-apple-ios8.0.0 -emit-llvm -fobjc-arc -O2 -disable-llvm-optzns -o - %s | FileCheck %s
+
+#import "nsvalue-boxed-expressions-support.h"
+
+// CHECK: [[CLASS:@.*]] = external global %struct._class_t
+// CHECK: [[NSVALUE:@.*]] = {{.*}}[[CLASS]]{{.*}}
+// CHECK: [[RANGE_STR:.*]] = {{.*}}_NSRange=II{{.*}}
+// CHECK: [[METH:@.*]] = private global{{.*}}valueWithBytes:objCType:{{.*}}
+// CHECK: [[VALUE_SEL:@.*]] = {{.*}}[[METH]]{{.*}}
+// CHECK: [[POINT_STR:.*]] = {{.*}}CGPoint=dd{{.*}}
+// CHECK: [[SIZE_STR:.*]] = {{.*}}CGSize=dd{{.*}}
+// CHECK: [[RECT_STR:.*]] = {{.*}}CGRect={CGPoint=dd}{CGSize=dd}}{{.*}}
+// CHECK: [[EDGE_STR:.*]] = {{.*}}NSEdgeInsets=dddd{{.*}}
+
+// CHECK-LABEL: define void @doRange()
+void doRange() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRange* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSRange ns_range = { .location = 0, .length = 42 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *range = @(ns_range);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doPoint()
+void doPoint() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGPoint{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGPoint{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGPoint* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGPoint cg_point = { .x = 42, .y = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[POINT_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *point = @(cg_point);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doSize()
+void doSize() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGSize{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGSize{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGSize* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGSize cg_size = { .width = 42, .height = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[SIZE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *size = @(cg_size);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRect()
+void doRect() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGRect{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGRect{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGRect* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGPoint cg_point = { .x = 42, .y = 24 };
+ CGSize cg_size = { .width = 42, .height = 24 };
+ CGRect cg_rect = { .origin = cg_point, .size = cg_size };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[RECT_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *rect = @(cg_rect);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doNSEdgeInsets()
+void doNSEdgeInsets() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSEdgeInsets ns_edge_insets;
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[EDGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *edge_insets = @(ns_edge_insets);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRangeRValue()
+void doRangeRValue() {
+ // CHECK: [[COERCE:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: call {{.*}} @getRange {{.*}} [[COERCE]]{{.*}}
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[COERCE_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *range_rvalue = @(getRange());
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
diff --git a/test/CodeGenObjC/nsvalue-objc-boxable-ios.m b/test/CodeGenObjC/nsvalue-objc-boxable-ios.m
new file mode 100644
index 000000000000..3443babdc9d0
--- /dev/null
+++ b/test/CodeGenObjC/nsvalue-objc-boxable-ios.m
@@ -0,0 +1,114 @@
+// RUN: %clang_cc1 -I %S/Inputs -triple armv7-apple-ios8.0.0 -emit-llvm -O2 -disable-llvm-optzns -o - %s | FileCheck %s
+
+#import "nsvalue-boxed-expressions-support.h"
+
+// CHECK: [[CLASS:@.*]] = external global %struct._class_t
+// CHECK: [[NSVALUE:@.*]] = {{.*}}[[CLASS]]{{.*}}
+// CHECK: [[RANGE_STR:.*]] = {{.*}}_NSRange=II{{.*}}
+// CHECK: [[METH:@.*]] = private global{{.*}}valueWithBytes:objCType:{{.*}}
+// CHECK: [[VALUE_SEL:@.*]] = {{.*}}[[METH]]{{.*}}
+// CHECK: [[POINT_STR:.*]] = {{.*}}CGPoint=dd{{.*}}
+// CHECK: [[SIZE_STR:.*]] = {{.*}}CGSize=dd{{.*}}
+// CHECK: [[RECT_STR:.*]] = {{.*}}CGRect={CGPoint=dd}{CGSize=dd}}{{.*}}
+// CHECK: [[EDGE_STR:.*]] = {{.*}}NSEdgeInsets=dddd{{.*}}
+
+// CHECK-LABEL: define void @doRange()
+void doRange() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRange* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSRange ns_range = { .location = 0, .length = 42 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ NSValue *range = @(ns_range);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doPoint()
+void doPoint() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGPoint{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGPoint{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGPoint* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGPoint cg_point = { .x = 42, .y = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[POINT_STR]]{{.*}})
+ NSValue *point = @(cg_point);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doSize()
+void doSize() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGSize{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGSize{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGSize* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGSize cg_size = { .width = 42, .height = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[SIZE_STR]]{{.*}})
+ NSValue *size = @(cg_size);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRect()
+void doRect() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.CGRect{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.CGRect{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.CGRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.CGRect* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.CGRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ CGPoint cg_point = { .x = 42, .y = 24 };
+ CGSize cg_size = { .width = 42, .height = 24 };
+ CGRect cg_rect = { .origin = cg_point, .size = cg_size };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[RECT_STR]]{{.*}})
+ NSValue *rect = @(cg_rect);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doNSEdgeInsets()
+void doNSEdgeInsets() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSEdgeInsets ns_edge_insets;
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[EDGE_STR]]{{.*}})
+ NSValue *edge_insets = @(ns_edge_insets);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRangeRValue()
+void doRangeRValue() {
+ // CHECK: [[COERCE:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: call {{.*}} @getRange {{.*}} [[COERCE]]{{.*}}
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[COERCE_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ NSValue *range_rvalue = @(getRange());
+ // CHECK: ret void
+}
+
diff --git a/test/CodeGenObjC/nsvalue-objc-boxable-mac-arc.m b/test/CodeGenObjC/nsvalue-objc-boxable-mac-arc.m
new file mode 100644
index 000000000000..92c91d7d3810
--- /dev/null
+++ b/test/CodeGenObjC/nsvalue-objc-boxable-mac-arc.m
@@ -0,0 +1,130 @@
+// RUN: %clang_cc1 -I %S/Inputs -triple x86_64-apple-macosx -emit-llvm -fobjc-arc -O2 -disable-llvm-optzns -o - %s | FileCheck %s
+
+#import "nsvalue-boxed-expressions-support.h"
+
+// CHECK: [[CLASS:@.*]] = external global %struct._class_t
+// CHECK: [[NSVALUE:@.*]] = {{.*}}[[CLASS]]{{.*}}
+// CHECK: [[RANGE_STR:.*]] = {{.*}}_NSRange=QQ{{.*}}
+// CHECK: [[METH:@.*]] = private global{{.*}}valueWithBytes:objCType:{{.*}}
+// CHECK: [[VALUE_SEL:@.*]] = {{.*}}[[METH]]{{.*}}
+// CHECK: [[POINT_STR:.*]] = {{.*}}_NSPoint=dd{{.*}}
+// CHECK: [[SIZE_STR:.*]] = {{.*}}_NSSize=dd{{.*}}
+// CHECK: [[RECT_STR:.*]] = {{.*}}_NSRect={_NSPoint=dd}{_NSSize=dd}}{{.*}}
+// CHECK: [[EDGE_STR:.*]] = {{.*}}NSEdgeInsets=dddd{{.*}}
+
+// CHECK-LABEL: define void @doRange()
+void doRange() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRange* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSRange ns_range = { .location = 0, .length = 42 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *range = @(ns_range);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doPoint()
+void doPoint() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSPoint{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSPoint{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSPoint* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSPoint ns_point = { .x = 42, .y = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[POINT_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *point = @(ns_point);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doSize()
+void doSize() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSSize{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSSize{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSSize* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSSize ns_size = { .width = 42, .height = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[SIZE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *size = @(ns_size);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRect()
+void doRect() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRect{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRect{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRect* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSPoint ns_point = { .x = 42, .y = 24 };
+ NSSize ns_size = { .width = 42, .height = 24 };
+ NSRect ns_rect = { .origin = ns_point, .size = ns_size };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[RECT_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *rect = @(ns_rect);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doNSEdgeInsets()
+void doNSEdgeInsets() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSEdgeInsets ns_edge_insets;
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[EDGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *edge_insets = @(ns_edge_insets);
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRangeRValue()
+void doRangeRValue() {
+ // CHECK: [[COERCE:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[RVAL:%.*]] = call {{.*}} @getRange()
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[COERCE_CAST_PTR:%.*]] = getelementptr {{.*}} [[COERCE_CAST]], {{.*}}
+ // CHECK: [[EXTR_RVAL:%.*]] = extractvalue {{.*}} [[RVAL]]{{.*}}
+ // CHECK: store {{.*}}[[EXTR_RVAL]]{{.*}}[[COERCE_CAST_PTR]]{{.*}}
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[COERCE_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ // CHECK: call i8* @objc_retainAutoreleasedReturnValue
+ NSValue *range_rvalue = @(getRange());
+ // CHECK: call void @objc_release
+ // CHECK: ret void
+}
+
diff --git a/test/CodeGenObjC/nsvalue-objc-boxable-mac.m b/test/CodeGenObjC/nsvalue-objc-boxable-mac.m
new file mode 100644
index 000000000000..da0ae7a57108
--- /dev/null
+++ b/test/CodeGenObjC/nsvalue-objc-boxable-mac.m
@@ -0,0 +1,118 @@
+// RUN: %clang_cc1 -I %S/Inputs -triple x86_64-apple-macosx -emit-llvm -O2 -disable-llvm-optzns -o - %s | FileCheck %s
+
+#import "nsvalue-boxed-expressions-support.h"
+
+// CHECK: [[CLASS:@.*]] = external global %struct._class_t
+// CHECK: [[NSVALUE:@.*]] = {{.*}}[[CLASS]]{{.*}}
+// CHECK: [[RANGE_STR:.*]] = {{.*}}_NSRange=QQ{{.*}}
+// CHECK: [[METH:@.*]] = private global{{.*}}valueWithBytes:objCType:{{.*}}
+// CHECK: [[VALUE_SEL:@.*]] = {{.*}}[[METH]]{{.*}}
+// CHECK: [[POINT_STR:.*]] = {{.*}}_NSPoint=dd{{.*}}
+// CHECK: [[SIZE_STR:.*]] = {{.*}}_NSSize=dd{{.*}}
+// CHECK: [[RECT_STR:.*]] = {{.*}}_NSRect={_NSPoint=dd}{_NSSize=dd}}{{.*}}
+// CHECK: [[EDGE_STR:.*]] = {{.*}}NSEdgeInsets=dddd{{.*}}
+
+// CHECK-LABEL: define void @doRange()
+void doRange() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRange* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRange* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSRange ns_range = { .location = 0, .length = 42 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ NSValue *range = @(ns_range);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doPoint()
+void doPoint() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSPoint{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSPoint{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSPoint* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSPoint* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSPoint ns_point = { .x = 42, .y = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[POINT_STR]]{{.*}})
+ NSValue *point = @(ns_point);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doSize()
+void doSize() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSSize{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSSize{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSSize* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSSize* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSSize ns_size = { .width = 42, .height = 24 };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8* {{.*}}[[SIZE_STR]]{{.*}})
+ NSValue *size = @(ns_size);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRect()
+void doRect() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct._NSRect{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct._NSRect{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct._NSRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct._NSRect* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct._NSRect* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSPoint ns_point = { .x = 42, .y = 24 };
+ NSSize ns_size = { .width = 42, .height = 24 };
+ NSRect ns_rect = { .origin = ns_point, .size = ns_size };
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[RECT_STR]]{{.*}})
+ NSValue *rect = @(ns_rect);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doNSEdgeInsets()
+void doNSEdgeInsets() {
+ // CHECK: [[LOCAL_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[TEMP_VAR:%.*]] = alloca %struct.NSEdgeInsets{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[TEMP_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[LOCAL_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[LOCAL_VAR]]{{.*}}
+ // CHECK: call void @llvm.memcpy{{.*}} [[TEMP_CAST]]{{.*}} [[LOCAL_CAST]]{{.*}}
+ // CHECK: [[PARAM_CAST:%.*]] = bitcast %struct.NSEdgeInsets* [[TEMP_VAR]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ NSEdgeInsets ns_edge_insets;
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[PARAM_CAST]], i8*{{.*}}[[EDGE_STR]]{{.*}})
+ NSValue *edge_insets = @(ns_edge_insets);
+ // CHECK: ret void
+}
+
+// CHECK-LABEL: define void @doRangeRValue()
+void doRangeRValue() {
+ // CHECK: [[COERCE:%.*]] = alloca %struct._NSRange{{.*}}
+ // CHECK: [[RECV_PTR:%.*]] = load {{.*}} [[NSVALUE]]
+ // CHECK: [[RVAL:%.*]] = call {{.*}} @getRange()
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[COERCE_CAST_PTR:%.*]] = getelementptr {{.*}} [[COERCE_CAST]], {{.*}}
+ // CHECK: [[EXTR_RVAL:%.*]] = extractvalue {{.*}} [[RVAL]]{{.*}}
+ // CHECK: store {{.*}}[[EXTR_RVAL]]{{.*}}[[COERCE_CAST_PTR]]{{.*}}
+ // CHECK: [[COERCE_CAST:%.*]] = bitcast %struct._NSRange* [[COERCE]]{{.*}}
+ // CHECK: [[SEL:%.*]] = load i8*, i8** [[VALUE_SEL]]
+ // CHECK: [[RECV:%.*]] = bitcast %struct._class_t* [[RECV_PTR]] to i8*
+ // CHECK: call {{.*objc_msgSend.*}}(i8* [[RECV]], i8* [[SEL]], i8* [[COERCE_CAST]], i8* {{.*}}[[RANGE_STR]]{{.*}})
+ NSValue *range_rvalue = @(getRange());
+ // CHECK: ret void
+}
+
diff --git a/test/CodeGenObjCXX/designated-initializers.mm b/test/CodeGenObjCXX/designated-initializers.mm
new file mode 100644
index 000000000000..41a4271bc8c5
--- /dev/null
+++ b/test/CodeGenObjCXX/designated-initializers.mm
@@ -0,0 +1,36 @@
+// RUN: %clang_cc1 -triple arm64 %s -verify -emit-llvm -o - | FileCheck %s
+// expected-no-diagnostics
+
+// Make sure we don't enter an infinite loop (rdar://21942503)
+
+int vals1[] = {
+ [__objc_yes] = 1,
+ [__objc_no] = 2
+};
+// CHECK: @vals1 = global [2 x i32] [i32 2, i32 1]
+
+int vals2[] = {
+ [true] = 3,
+ [false] = 4
+};
+// CHECK: @vals2 = global [2 x i32] [i32 4, i32 3]
+
+int vals3[] = {
+ [false] = 1,
+ [true] = 2,
+ 5
+};
+// CHECK: @vals3 = global [3 x i32] [i32 1, i32 2, i32 5]
+
+int vals4[2] = {
+ [true] = 5,
+ [false] = 6
+};
+// CHECK: @vals4 = global [2 x i32] [i32 6, i32 5]
+
+int vals5[3] = {
+ [false] = 1,
+ [true] = 2,
+ 6
+};
+// CHECK: @vals5 = global [3 x i32] [i32 1, i32 2, i32 6]
diff --git a/test/CodeGenOpenCL/vector_shufflevector_valid.cl b/test/CodeGenOpenCL/vector_shufflevector_valid.cl
new file mode 100644
index 000000000000..0953c66f58e2
--- /dev/null
+++ b/test/CodeGenOpenCL/vector_shufflevector_valid.cl
@@ -0,0 +1,13 @@
+// RUN: %clang_cc1 -emit-llvm -O0 %s -o - | FileCheck %s
+
+// The shuffle vector mask must always be of i32 vector type
+// See http://reviews.llvm.org/D10838 and https://llvm.org/bugs/show_bug.cgi?id=23800#c2
+// for more information about a bug where a 64 bit index operand causes the generation
+// of an invalid mask
+
+typedef unsigned int uint2 __attribute((ext_vector_type(2)));
+
+void vector_shufflevector_valid(void) {
+ //CHECK: {{%.*}} = shufflevector <2 x i32> {{%.*}}, <2 x i32> undef, <2 x i32> <i32 0, i32 undef>
+ (uint2)(((uint2)(0)).s0, 0);
+}
diff --git a/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/backward/.keep b/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/backward/.keep
diff --git a/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32/.keep b/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_arch_tree/usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed/.keep b/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include/.keep b/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_arch_tree/usr/lib/gcc/i686-w64-mingw32/5.1.0/include/.keep
diff --git a/test/Driver/Inputs/mingw_clang_tree/mingw32/i686-w64-mingw32/include/.keep b/test/Driver/Inputs/mingw_clang_tree/mingw32/i686-w64-mingw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_clang_tree/mingw32/i686-w64-mingw32/include/.keep
diff --git a/test/Driver/Inputs/mingw_clang_tree/mingw32/include/.keep b/test/Driver/Inputs/mingw_clang_tree/mingw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_clang_tree/mingw32/include/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/backward/.keep b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/backward/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/i686-w64-mingw32/.keep b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/i686-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/i686-w64-mingw32/include/c++/i686-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include-fixed/.keep b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include/.keep b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_builds_tree/mingw32/lib/gcc/i686-w64-mingw32/4.9.1/include/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/include/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/include/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include-fixed/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/backward/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/backward/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/mingw32/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/lib/gcc/mingw32/4.8.1/include/c++/mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_mingw_org_tree/mingw/minw32/include/.keep b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/minw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_mingw_org_tree/mingw/minw32/include/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/i686-w64-mingw32/include/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/i686-w64-mingw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/i686-w64-mingw32/include/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/backward/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/backward/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/i686-w64-mingw32/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/i686-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/include/c++/4.9.2/i686-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include-fixed/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include/.keep b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_msys2_tree/msys64/mingw32/lib/gcc/i686-w64-mingw32/4.9.2/include/.keep
diff --git a/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed/.keep b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward/.keep b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward/.keep
diff --git a/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32/.keep b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_opensuse_tree/usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_opensuse_tree/usr/x86_64-w64-mingw32/sys-root/mingw/include/.keep b/test/Driver/Inputs/mingw_opensuse_tree/usr/x86_64-w64-mingw32/sys-root/mingw/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_opensuse_tree/usr/x86_64-w64-mingw32/sys-root/mingw/include/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/86_64-w64-mingw32/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/86_64-w64-mingw32/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/86_64-w64-mingw32/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/backward/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/backward/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/include/c++/4.8/backward/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/lib/gcc/x86_64-w64-mingw32/4.8/include/.keep
diff --git a/test/Driver/Inputs/mingw_ubuntu_tree/usr/x86_64-w64-mingw32/include/.keep b/test/Driver/Inputs/mingw_ubuntu_tree/usr/x86_64-w64-mingw32/include/.keep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Driver/Inputs/mingw_ubuntu_tree/usr/x86_64-w64-mingw32/include/.keep
diff --git a/test/Driver/arm-ias-Wa.s b/test/Driver/arm-ias-Wa.s
new file mode 100644
index 000000000000..6eadd3b841d5
--- /dev/null
+++ b/test/Driver/arm-ias-Wa.s
@@ -0,0 +1,64 @@
+// Test that different values of -Wa,-mcpu/mfpu/march/mhwdiv pick correct ARM target-feature(s).
+// Complete tests about -mcpu/mfpu/march/mhwdiv on other files.
+
+// CHECK-DUP-CPU: warning: argument unused during compilation: '-mcpu=cortex-a8'
+// CHECK-DUP-FPU: warning: argument unused during compilation: '-mfpu=vfpv3'
+// CHECK-DUP-ARCH: warning: argument unused during compilation: '-march=armv7'
+// CHECK-DUP-HDIV: warning: argument unused during compilation: '-mhwdiv=arm'
+
+// CHECK: "cc1as"
+// ================================================================= CPU
+// RUN: %clang -target arm-linux-gnueabi -Wa,-mcpu=cortex-a15 -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-CPU %s
+// CHECK-CPU: "-target-cpu" "cortex-a15"
+
+// RUN: %clang -target arm -Wa,-mcpu=bogus -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-BOGUS-CPU %s
+// CHECK-BOGUS-CPU: error: {{.*}} does not support '-Wa,-mcpu=bogus'
+
+// RUN: %clang -target arm -mcpu=cortex-a8 -Wa,-mcpu=cortex-a15 -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-DUP-CPU %s
+// CHECK-DUP-CPU: "-target-cpu" "cortex-a15"
+
+// ================================================================= FPU
+// RUN: %clang -target arm-linux-eabi -Wa,-mfpu=neon -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-NEON %s
+// CHECK-NEON: "-target-feature" "+neon"
+
+// RUN: %clang -target arm-linux-eabi -Wa,-mfpu=bogus -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-BOGUS-FPU %s
+// CHECK-BOGUS-FPU: error: {{.*}} does not support '-Wa,-mfpu=bogus'
+
+// RUN: %clang -target arm-linux-eabi -mfpu=vfpv3 -Wa,-mfpu=neon -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-DUP-FPU %s
+// CHECK-DUP-FPU: "-target-feature" "+neon"
+
+// ================================================================= Arch
+// Arch validation only for now, in case we're passing to an external asm
+
+// RUN: %clang -target arm -Wa,-march=armbogusv6 -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-BOGUS-ARCH %s
+// CHECK-BOGUS-ARCH: error: {{.*}} does not support '-Wa,-march=armbogusv6'
+
+// RUN: %clang -target arm -march=armv7 -Wa,-march=armv6 -c %s -### 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-DUP-ARCH %s
+
+// ================================================================= HD Div
+// RUN: %clang -target arm -Wa,-mhwdiv=arm -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-ARM %s
+// CHECK-ARM: "-target-feature" "+hwdiv-arm"
+// CHECK-ARM: "-target-feature" "-hwdiv"
+
+// RUN: %clang -target arm -Wa,-mhwdiv=thumb -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-THUMB %s
+// CHECK-THUMB: "-target-feature" "-hwdiv-arm"
+// CHECK-THUMB: "-target-feature" "+hwdiv"
+
+// RUN: %clang -target arm -Wa,-mhwdiv=bogus -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-BOGUS-HDIV %s
+// CHECK-BOGUS-HDIV: error: {{.*}} does not support '-Wa,-mhwdiv=bogus'
+
+// RUN: %clang -target arm -mhwdiv=arm -Wa,-mhwdiv=thumb -c %s -### 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-DUP-HDIV %s
+// CHECK-DUP-HDIV: "-target-feature" "-hwdiv-arm"
+// CHECK-DUP-HDIV: "-target-feature" "+hwdiv"
diff --git a/test/Driver/cuda-options.cu b/test/Driver/cuda-options.cu
index 90bdea4499c8..65d4913a159e 100644
--- a/test/Driver/cuda-options.cu
+++ b/test/Driver/cuda-options.cu
@@ -4,7 +4,7 @@
// REQUIRES: nvptx-registered-target
// Simple compilation case:
-// RUN: %clang -### -target=x86_64-linux-gnu -c %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu -c %s 2>&1 \
// Compile device-side to PTX assembly and make sure we use it on the host side.
// RUN: | FileCheck -check-prefix CUDA-D1 \
// Then compile host side and incorporate device code.
@@ -13,7 +13,7 @@
// RUN: -check-prefix CUDA-NL %s
// Typical compilation + link case:
-// RUN: %clang -### -target=x86_64-linux-gnu %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu %s 2>&1 \
// Compile device-side to PTX assembly and make sure we use it on the host side
// RUN: | FileCheck -check-prefix CUDA-D1 \
// Then compile host side and incorporate device code.
@@ -22,7 +22,7 @@
// RUN: -check-prefix CUDA-L %s
// Verify that -cuda-no-device disables device-side compilation and linking
-// RUN: %clang -### -target=x86_64-linux-gnu --cuda-host-only %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu --cuda-host-only %s 2>&1 \
// Make sure we didn't run device-side compilation.
// RUN: | FileCheck -check-prefix CUDA-ND \
// Then compile host side and make sure we don't attempt to incorporate GPU code.
@@ -31,7 +31,7 @@
// RUN: -check-prefix CUDA-NL %s
// Verify that -cuda-no-host disables host-side compilation and linking
-// RUN: %clang -### -target=x86_64-linux-gnu --cuda-device-only %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu --cuda-device-only %s 2>&1 \
// Compile device-side to PTX assembly
// RUN: | FileCheck -check-prefix CUDA-D1 \
// Make sure there are no host cmpilation or linking.
@@ -39,7 +39,7 @@
// Verify that with -S we compile host and device sides to assembly
// and incorporate device code on the host side.
-// RUN: %clang -### -target=x86_64-linux-gnu -S -c %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu -S -c %s 2>&1 \
// Compile device-side to PTX assembly
// RUN: | FileCheck -check-prefix CUDA-D1 \
// Then compile host side and incorporate GPU code.
@@ -49,7 +49,7 @@
// Verify that --cuda-gpu-arch option passes correct GPU
// archtecture info to device compilation.
-// RUN: %clang -### -target=x86_64-linux-gnu --cuda-gpu-arch=sm_35 -c %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm_35 -c %s 2>&1 \
// Compile device-side to PTX assembly.
// RUN: | FileCheck -check-prefix CUDA-D1 -check-prefix CUDA-D1-SM35 \
// Then compile host side and incorporate GPU code.
@@ -59,7 +59,7 @@
// Verify that there is device-side compilation per --cuda-gpu-arch args
// and that all results are included on the host side.
-// RUN: %clang -### -target=x86_64-linux-gnu --cuda-gpu-arch=sm_35 --cuda-gpu-arch=sm_30 -c %s 2>&1 \
+// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm_35 --cuda-gpu-arch=sm_30 -c %s 2>&1 \
// Compile both device-sides to PTX assembly
// RUN: | FileCheck \
// RUN: -check-prefix CUDA-D1 -check-prefix CUDA-D1-SM35 \
diff --git a/test/Driver/linux-ld.c b/test/Driver/linux-ld.c
index 5e865a953c7c..a976b388d3cc 100644
--- a/test/Driver/linux-ld.c
+++ b/test/Driver/linux-ld.c
@@ -618,6 +618,13 @@
// CHECK-ARM: "-dynamic-linker" "{{.*}}/lib/ld-linux.so.3"
//
// RUN: %clang %s -### -o %t.o 2>&1 \
+// RUN: --target=arm-linux-gnueabi -mfloat-abi=hard \
+// RUN: | FileCheck --check-prefix=CHECK-ARM-ABIHF %s
+// CHECK-ARM-ABIHF: "{{.*}}ld{{(.exe)?}}"
+// CHECK-ARM-ABIHF: "-m" "armelf_linux_eabi"
+// CHECK-ARM-ABIHF: "-dynamic-linker" "{{.*}}/lib/ld-linux-armhf.so.3"
+//
+// RUN: %clang %s -### -o %t.o 2>&1 \
// RUN: --target=arm-linux-gnueabihf \
// RUN: | FileCheck --check-prefix=CHECK-ARM-HF %s
// CHECK-ARM-HF: "{{.*}}ld{{(.exe)?}}"
diff --git a/test/Driver/mingw.cpp b/test/Driver/mingw.cpp
new file mode 100644
index 000000000000..8dc5b96034ce
--- /dev/null
+++ b/test/Driver/mingw.cpp
@@ -0,0 +1,59 @@
+// RUN: %clang -target i686-windows-gnu -c -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_CLANG_TREE %s
+// CHECK_MINGW_CLANG_TREE: "{{.*}}/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
+// CHECK_MINGW_CLANG_TREE: "{{.*}}/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}include"
+
+
+// RUN: %clang -target i686-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_mingw_org_tree/mingw %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_ORG_TREE %s
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++"
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}mingw32"
+// CHECK_MINGW_ORG_TREE: "{{.*}}{{/|\\\\}}Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include"
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include-fixed"
+// CHECK_MINGW_ORG_TREE: "{{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}mingw32{{/|\\\\}}include"
+// CHECK_MINGW_ORG_TREE: {{.*}}/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}include
+
+
+// RUN: %clang -target i686-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_mingw_builds_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_BUILDS_TREE %s
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}i686-w64-mingw32"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}4.9.1{{/|\\\\}}include"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}4.9.1{{/|\\\\}}include-fixed"
+// CHECK_MINGW_BUILDS_TREE: "{{.*}}/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
+
+
+// RUN: %clang -target i686-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_msys2_tree/msys64/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_MSYS_TREE %s
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64{{/|\\\\}}mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2{{/|\\\\}}i686-w64-mingw32"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2{{/|\\\\}}backward"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}4.9.2{{/|\\\\}}include"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}4.9.2{{/|\\\\}}include-fixed"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
+// CHECK_MINGW_MSYS_TREE: "{{.*}}/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include"
+
+
+// RUN: %clang -target x86_64-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_opensuse_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_OPENSUSE_TREE %s
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}x86_64-w64-mingw32"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}x86_64-w64-mingw32/sys-root/mingw/include"
+// CHECK_MINGW_OPENSUSE_TREE: "{{.*}}/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include-fixed"
+
+
+// RUN: %clang -target i686-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_arch_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_ARCH_TREE %s
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0{{/|\\\\}}i686-w64-mingw32"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0{{/|\\\\}}backward"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include-fixed"
+// CHECK_MINGW_ARCH_TREE: "{{.*}}/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
+
+
+// RUN: %clang -target x86_64-pc-windows-gnu -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_ubuntu_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_UBUNTU_TREE %s
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8{{/|\\\\}}x86_64-w64-mingw32"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8{{/|\\\\}}backward"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}4.8{{/|\\\\}}include"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}4.8{{/|\\\\}}include-fixed"
+// CHECK_MINGW_UBUNTU_TREE: "{{.*}}/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
diff --git a/test/Headers/pmmintrin.c b/test/Headers/pmmintrin.c
deleted file mode 100644
index 5b7a3a4ef6b9..000000000000
--- a/test/Headers/pmmintrin.c
+++ /dev/null
@@ -1,12 +0,0 @@
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding %s -verify
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding -x c++ %s -verify
-// expected-no-diagnostics
-
-#if defined(i386) || defined(__x86_64__)
-#include <pmmintrin.h>
-
-int __attribute__((__target__(("sse3")))) foo(int a) {
- _mm_mwait(0, 0);
- return 4;
-}
-#endif
diff --git a/test/Headers/x86intrin-2.c b/test/Headers/x86intrin-2.c
deleted file mode 100644
index f98fdbd13a8a..000000000000
--- a/test/Headers/x86intrin-2.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding %s -verify
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding -fno-lax-vector-conversions %s -verify
-// RUN: %clang_cc1 -fsyntax-only -ffreestanding -x c++ %s -verify
-// expected-no-diagnostics
-
-#if defined(i386) || defined(__x86_64__)
-
-// Include the metaheader that includes all x86 intrinsic headers.
-#include <x86intrin.h>
-
-void __attribute__((__target__("mmx"))) mm_empty_wrap(void) {
- _mm_empty();
-}
-
-__m128 __attribute__((__target__("sse"))) mm_add_ss_wrap(__m128 a, __m128 b) {
- return _mm_add_ss(a, b);
-}
-
-__m128d __attribute__((__target__("sse2"))) mm_sqrt_sd_wrap(__m128d a, __m128d b) {
- return _mm_sqrt_sd(a, b);
-}
-
-void __attribute__((__target__("sse3"))) mm_mwait_wrap(int a) {
- _mm_mwait(0, 0);
-}
-
-__m64 __attribute__((__target__("ssse3"))) mm_abs_pi8_wrap(__m64 a) {
- return _mm_abs_pi8(a);
-}
-
-__m128i __attribute__((__target__("sse4.1"))) mm_minpos_epu16_wrap(__m128i v) {
- return _mm_minpos_epu16(v);
-}
-
-unsigned int __attribute__((__target__("sse4.2"))) mm_crc32_u8_wrap(unsigned int c, unsigned char d) {
- return _mm_crc32_u8(c, d);
-}
-
-__m128i __attribute__((__target__("aes"))) mm_aesenc_si128_wrap(__m128i v, __m128i r) {
- return _mm_aesenc_si128(v, r);
-}
-
-__m256d __attribute__((__target__("avx"))) mm256_add_pd_wrap(__m256d a, __m256d b) {
- return _mm256_add_pd(a, b);
-}
-
-__m256i __attribute__((__target__("avx2"))) mm256_abs_epi8_wrap(__m256i a) {
- return _mm256_abs_epi8(a);
-}
-
-unsigned short __attribute__((__target__("bmi"))) tzcnt_u16_wrap(unsigned short x) {
- return __tzcnt_u16(x);
-}
-
-unsigned int __attribute__((__target__("bmi2"))) bzhi_u32_wrap(unsigned int x, unsigned int y) {
- return _bzhi_u32(x, y);
-}
-
-unsigned short __attribute__((__target__("lzcnt"))) lzcnt16_wrap(unsigned short x) {
- return __lzcnt16(x);
-}
-
-__m256d __attribute__((__target__("fma"))) mm256_fmsubadd_pd_wrap(__m256d a, __m256d b, __m256d c) {
- return _mm256_fmsubadd_pd(a, b, c);
-}
-
-__m512i __attribute__((__target__("avx512f"))) mm512_setzero_si512_wrap(void) {
- return _mm512_setzero_si512();
-}
-
-__mmask8 __attribute__((__target__("avx512vl"))) mm_cmpeq_epi32_mask_wrap(__m128i a, __m128i b) {
- return _mm_cmpeq_epi32_mask(a, b);
-}
-
-__v64qi __attribute__((__target__("avx512bw"))) mm512_setzero_qi_wrap(void) {
- return _mm512_setzero_qi();
-}
-
-__m512i __attribute__((__target__("avx512dq"))) mm512_mullo_epi64_wrap(__m512i a, __m512i b) {
- return _mm512_mullo_epi64(a, b);
-}
-
-__mmask16 __attribute__((__target__("avx512vl,avx512bw"))) mm_cmpeq_epi8_mask_wrap(__m128i a, __m128i b) {
- return _mm_cmpeq_epi8_mask(a, b);
-}
-
-__m256i __attribute__((__target__("avx512vl,avx512dq"))) mm256_mullo_epi64_wrap(__m256i a, __m256i b) {
- return _mm256_mullo_epi64(a, b);
-}
-
-int __attribute__((__target__("rdrnd"))) rdrand16_step_wrap(unsigned short *p) {
- return _rdrand16_step(p);
-}
-
-#if defined(__x86_64__)
-unsigned int __attribute__((__target__("fsgsbase"))) readfsbase_u32_wrap(void) {
- return _readfsbase_u32();
-}
-#endif
-
-unsigned int __attribute__((__target__("rtm"))) xbegin_wrap(void) {
- return _xbegin();
-}
-
-__m128i __attribute__((__target__("sha"))) mm_sha1nexte_epu32_wrap(__m128i x, __m128i y) {
- return _mm_sha1nexte_epu32(x, y);
-}
-
-int __attribute__((__target__("rdseed"))) rdseed16_step_wrap(unsigned short *p) {
- return _rdseed16_step(p);
-}
-
-__m128i __attribute__((__target__("sse4a"))) mm_extract_si64_wrap(__m128i x, __m128i y) {
- return _mm_extract_si64(x, y);
-}
-
-__m128 __attribute__((__target__("fma4"))) mm_macc_ps_wrap(__m128 a, __m128 b, __m128 c) {
- return _mm_macc_ps(a, b, c);
-}
-
-__m256 __attribute__((__target__("xop"))) mm256_frcz_ps_wrap(__m256 a) {
- return _mm256_frcz_ps(a);
-}
-
-unsigned int __attribute__((__target__("tbm"))) blcfill_u32_wrap(unsigned int a) {
- return __blcfill_u32(a);
-}
-
-__m128 __attribute__((__target__("f16c"))) mm_cvtph_ps_wrap(__m128i a) {
- return _mm_cvtph_ps(a);
-}
-
-int __attribute__((__target__("rtm"))) xtest_wrap(void) {
- return _xtest();
-}
-
-#endif
diff --git a/test/Headers/x86intrin.c b/test/Headers/x86intrin.c
index 7c15c4816b33..6a1608b756a0 100644
--- a/test/Headers/x86intrin.c
+++ b/test/Headers/x86intrin.c
@@ -5,7 +5,126 @@
#if defined(i386) || defined(__x86_64__)
-// Include the metaheader that includes all x86 intrinsic headers.
+// Pretend to enable all features.
+#ifndef __3dNOW__
+#define __3dNOW__
+#endif
+#ifndef __BMI__
+#define __BMI__
+#endif
+#ifndef __BMI2__
+#define __BMI2__
+#endif
+#ifndef __LZCNT__
+#define __LZCNT__
+#endif
+#ifndef __POPCNT__
+#define __POPCNT__
+#endif
+#ifndef __RDSEED__
+#define __RDSEED__
+#endif
+#ifndef __PRFCHW__
+#define __PRFCHW__
+#endif
+#ifndef __SSE4A__
+#define __SSE4A__
+#endif
+#ifndef __FMA4__
+#define __FMA4__
+#endif
+#ifndef __XOP__
+#define __XOP__
+#endif
+#ifndef __F16C__
+#define __F16C__
+#endif
+#ifndef __MMX__
+#define __MMX__
+#endif
+#ifndef __SSE__
+#define __SSE__
+#endif
+#ifndef __SSE2__
+#define __SSE2__
+#endif
+#ifndef __SSE3__
+#define __SSE3__
+#endif
+#ifndef __SSSE3__
+#define __SSSE3__
+#endif
+#ifndef __SSE4_1__
+#define __SSE4_1__
+#endif
+#ifndef __SSE4_2__
+#define __SSE4_2__
+#endif
+#ifndef __AES__
+#define __AES__
+#endif
+#ifndef __AVX__
+#define __AVX__
+#endif
+#ifndef __AVX2__
+#define __AVX2__
+#endif
+#ifndef __BMI__
+#define __BMI__
+#endif
+#ifndef __BMI2__
+#define __BMI2__
+#endif
+#ifndef __LZCNT__
+#define __LZCNT__
+#endif
+#ifndef __FMA__
+#define __FMA__
+#endif
+#ifndef __RDRND__
+#define __RDRND__
+#endif
+#ifndef __SHA__
+#define __SHA__
+#endif
+#ifndef __ADX__
+#define __ADX__
+#endif
+#ifndef __TBM__
+#define __TBM__
+#endif
+#ifndef __RTM__
+#define __RTM__
+#endif
+#ifndef __PCLMUL__
+#define __PCLMUL__
+#endif
+#ifndef __FSGSBASE__
+#define __FSGSBASE__
+#endif
+#ifndef __AVX512F__
+#define __AVX512F__
+#endif
+#ifndef __AVX512VL__
+#define __AVX512VL__
+#endif
+#ifndef __AVX512BW__
+#define __AVX512BW__
+#endif
+#ifndef __AVX512ER__
+#define __AVX512ER__
+#endif
+#ifndef __AVX512PF__
+#define __AVX512PF__
+#endif
+#ifndef __AVX512DQ__
+#define __AVX512DQ__
+#endif
+#ifndef __AVX512CD__
+#define __AVX512CD__
+#endif
+
+// Now include the metaheader that includes all x86 intrinsic headers.
#include <x86intrin.h>
#endif
diff --git a/test/Lexer/has_attribute_objc_boxable.m b/test/Lexer/has_attribute_objc_boxable.m
new file mode 100644
index 000000000000..e172ecaba2bb
--- /dev/null
+++ b/test/Lexer/has_attribute_objc_boxable.m
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -E %s -o - | FileCheck %s
+
+#if __has_attribute(objc_boxable)
+int has_objc_boxable_attribute();
+#endif
+
+// CHECK: has_objc_boxable_attribute
+
diff --git a/test/Lexer/has_feature_boxed_nsvalue_expressions.m b/test/Lexer/has_feature_boxed_nsvalue_expressions.m
new file mode 100644
index 000000000000..8c66bcb3d760
--- /dev/null
+++ b/test/Lexer/has_feature_boxed_nsvalue_expressions.m
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -E %s -o - | FileCheck %s
+
+#if __has_feature(objc_boxed_nsvalue_expressions)
+int has_objc_boxed_nsvalue_expressions();
+#endif
+
+// CHECK: has_objc_boxed_nsvalue_expressions
+
diff --git a/test/Misc/cc1as-asm.s b/test/Misc/cc1as-asm.s
new file mode 100644
index 000000000000..af92644073ac
--- /dev/null
+++ b/test/Misc/cc1as-asm.s
@@ -0,0 +1,3 @@
+// Run cc1as asm output path just to make sure it works
+// REQUIRES: x86-registered-target
+// RUN: %clang -cc1as -triple x86_64-apple-macosx10.10.0 -filetype asm %s -o /dev/null
diff --git a/test/Modules/pch_container.m b/test/Modules/pch_container.m
index 095245bdc3a4..95ef3edcd668 100644
--- a/test/Modules/pch_container.m
+++ b/test/Modules/pch_container.m
@@ -1,9 +1,11 @@
@import DependsOnModule;
// REQUIRES: x86-registered-target
-// RUN: rm -rf %t-MachO %t-ELF %t-ELF_SPLIT %t-COFF
-// RUN: %clang_cc1 -triple=x86_64-apple-darwin -fmodules -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-MachO -F %S/Inputs %s
-// RUN: %clang_cc1 -triple=x86_64-linux-elf -fmodules -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-ELF -F %S/Inputs %s
-// RUN: %clang_cc1 -triple=x86_64-windows-coff -fmodules -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-COFF -F %S/Inputs %s
+// RUN: rm -rf %t-MachO %t-ELF %t-ELF_SPLIT %t-COFF %t-raw
+// RUN: %clang_cc1 -triple=x86_64-apple-darwin -fmodules -fmodule-format=obj -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-MachO -F %S/Inputs %s
+// RUN: %clang_cc1 -triple=x86_64-linux-elf -fmodules -fmodule-format=obj -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-ELF -F %S/Inputs %s
+// RUN: %clang_cc1 -triple=x86_64-windows-coff -fmodules -fmodule-format=obj -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-COFF -F %S/Inputs %s
+// RUN: %clang_cc1 -triple=x86_64-apple-darwin -fmodules -fmodule-format=raw -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-raw -F %S/Inputs %s
+
// RUN: llvm-objdump -section-headers %t-MachO/DependsOnModule.pcm %t-ELF/DependsOnModule.pcm %t-COFF/DependsOnModule.pcm | FileCheck %s
// CHECK: file format Mach-O 64-bit x86-64
@@ -13,5 +15,6 @@
// CHECK: file format COFF-x86-64
// CHECK: clangast {{[0-9a-f]+}} {{[0-9a-f]+}}
+// RUN: not llvm-objdump -section-headers %t-raw/DependsOnModule.pcm
// RUN: %clang_cc1 -split-dwarf-file t-split.dwo -triple=x86_64-linux-elf -fmodules -fimplicit-module-maps -fdisable-module-hash -fmodules-cache-path=%t-ELF_SPLIT -F %S/Inputs %s -o %t-split.o
diff --git a/test/OpenMP/for_codegen.cpp b/test/OpenMP/for_codegen.cpp
index 082a0d4d8841..9db157011075 100644
--- a/test/OpenMP/for_codegen.cpp
+++ b/test/OpenMP/for_codegen.cpp
@@ -329,14 +329,9 @@ void test_precond() {
// CHECK: [[A_ADDR:%.+]] = alloca i8,
// CHECK: [[I_ADDR:%.+]] = alloca i8,
char a = 0;
- // CHECK: store i32 0, i32* [[IV_ADDR:%.+]],
- // CHECK: [[A:%.+]] = load i8, i8* [[A_ADDR]],
- // CHECK: [[CONV:%.+]] = sext i8 [[A]] to i32
- // CHECK: [[IV:%.+]] = load i32, i32* [[IV_ADDR]],
- // CHECK: [[MUL:%.+]] = mul nsw i32 [[IV]], 1
- // CHECK: [[ADD:%.+]] = add nsw i32 [[CONV]], [[MUL]]
- // CHECK: [[CONV:%.+]] = trunc i32 [[ADD]] to i8
- // CHECK: store i8 [[CONV]], i8* [[I_ADDR]],
+ // CHECK: store i8 0,
+ // CHECK: store i32
+ // CHECK: store i8
// CHECK: [[A:%.+]] = load i8, i8* [[A_ADDR]],
// CHECK: [[CONV:%.+]] = sext i8 [[A]] to i32
// CHECK: [[CMP:%.+]] = icmp slt i32 [[CONV]], 10
@@ -389,6 +384,7 @@ void for_with_global_lcv() {
// CHECK: store i8 [[I_VAL]], i8* [[K]]
// CHECK-NOT: [[I]]
// CHECK: call void @__kmpc_for_static_fini(
+// CHECK: call void @__kmpc_barrier(
#pragma omp for
for (i = 0; i < 2; ++i) {
k = i;
@@ -410,5 +406,70 @@ void for_with_global_lcv() {
}
}
-#endif // HEADER
+struct Bool {
+ Bool(bool b) : b(b) {}
+ operator bool() const { return b; }
+ const bool b;
+};
+
+template <typename T>
+struct It {
+ It() : p(0) {}
+ It(const It &, int = 0) ;
+ template <typename U>
+ It(U &, int = 0) ;
+ It &operator=(const It &);
+ It &operator=(It &);
+ ~It() {}
+
+ It(T *p) : p(p) {}
+
+ operator T *&() { return p; }
+ operator T *() const { return p; }
+ T *operator->() const { return p; }
+
+ It &operator++() { ++p; return *this; }
+ It &operator--() { --p; return *this; }
+ It &operator+=(unsigned n) { p += n; return *this; }
+ It &operator-=(unsigned n) { p -= n; return *this; }
+
+ T *p;
+};
+
+template <typename T>
+It<T> operator+(It<T> a, typename It<T>::difference_type n) { return a.p + n; }
+
+template <typename T>
+It<T> operator+(typename It<T>::difference_type n, It<T> a) { return a.p + n; }
+
+template <typename T>
+It<T> operator-(It<T> a, typename It<T>::difference_type n) { return a.p - n; }
+
+typedef Bool BoolType;
+
+template <typename T>
+BoolType operator<(It<T> a, It<T> b) { return a.p < b.p; }
+void loop_with_It(It<char> begin, It<char> end) {
+#pragma omp for
+ for (It<char> it = begin; it < end; ++it) {
+ *it = 0;
+ }
+}
+
+// CHECK-LABEL: loop_with_It
+// CHECK: call i32 @__kmpc_global_thread_num(
+// CHECK: call void @__kmpc_for_static_init_8(
+// CHECK: call void @__kmpc_for_static_fini(
+
+void loop_with_stmt_expr() {
+#pragma omp for
+ for (int i = __extension__({float b = 0;b; }); i < __extension__({double c = 1;c; }); i += __extension__({char d = 1; d; }))
+ ;
+}
+// CHECK-LABEL: loop_with_stmt_expr
+// CHECK: call i32 @__kmpc_global_thread_num(
+// CHECK: call void @__kmpc_for_static_init_4(
+// CHECK: call void @__kmpc_for_static_fini(
+
+#endif // HEADER
diff --git a/test/OpenMP/for_loop_messages.cpp b/test/OpenMP/for_loop_messages.cpp
index f425defdf3e2..1a1315507893 100644
--- a/test/OpenMP/for_loop_messages.cpp
+++ b/test/OpenMP/for_loop_messages.cpp
@@ -423,12 +423,12 @@ public:
typedef int difference_type;
typedef std::random_access_iterator_tag iterator_category;
};
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
int operator-(GoodIter a, GoodIter b) { return 0; }
// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}}
GoodIter operator-(GoodIter a) { return a; }
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
GoodIter operator-(GoodIter a, int v) { return GoodIter(); }
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}}
@@ -479,7 +479,7 @@ int test_with_random_access_iterator() {
#pragma omp for
for (begin = GoodIter(0); begin < end; ++begin)
++begin;
-// expected-error@+4 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}}
+// expected-error@+4 {{invalid operands to binary expression ('GoodIter' and 'Iter0')}}
// expected-error@+3 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
#pragma omp parallel
#pragma omp for
@@ -562,7 +562,7 @@ int test_with_random_access_iterator() {
for (Iter1 I = begin1; I >= end1; ++I)
++I;
#pragma omp parallel
-// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// Initializer is constructor with all default params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
diff --git a/test/OpenMP/for_simd_loop_messages.cpp b/test/OpenMP/for_simd_loop_messages.cpp
index 6e68eb8a082d..da7e6843a0c4 100644
--- a/test/OpenMP/for_simd_loop_messages.cpp
+++ b/test/OpenMP/for_simd_loop_messages.cpp
@@ -406,12 +406,12 @@ public:
typedef int difference_type;
typedef std::random_access_iterator_tag iterator_category;
};
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
int operator-(GoodIter a, GoodIter b) { return 0; }
// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}}
GoodIter operator-(GoodIter a) { return a; }
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
GoodIter operator-(GoodIter a, int v) { return GoodIter(); }
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}}
@@ -463,7 +463,7 @@ int test_with_random_access_iterator() {
for (begin = GoodIter(0); begin < end; ++begin)
++begin;
#pragma omp parallel
-// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}}
+// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'Iter0')}}
// expected-error@+2 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
#pragma omp for simd
for (begin = begin0; begin < end; ++begin)
@@ -544,7 +544,7 @@ int test_with_random_access_iterator() {
for (Iter1 I = begin1; I >= end1; ++I)
++I;
#pragma omp parallel
-// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// Initializer is constructor with all default params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
diff --git a/test/OpenMP/openmp_common.c b/test/OpenMP/openmp_common.c
index 3765f4c5dc14..ed55796b3374 100644
--- a/test/OpenMP/openmp_common.c
+++ b/test/OpenMP/openmp_common.c
@@ -7,3 +7,8 @@ void foo() {
#pragma omp // expected-error {{expected an OpenMP directive}}
#pragma omp unknown_directive // expected-error {{expected an OpenMP directive}}
}
+
+typedef struct S {
+#pragma omp parallel for private(j) schedule(static) if (tree1->totleaf > 1024) // expected-error {{unexpected OpenMP directive '#pragma omp parallel for'}}
+} St;
+
diff --git a/test/OpenMP/parallel_copyin_codegen.cpp b/test/OpenMP/parallel_copyin_codegen.cpp
index 281d82ce9705..4aacb809573f 100644
--- a/test/OpenMP/parallel_copyin_codegen.cpp
+++ b/test/OpenMP/parallel_copyin_codegen.cpp
@@ -4,6 +4,14 @@
// RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -DLAMBDA -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
// RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -fblocks -DBLOCKS -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
// RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=ARRAY %s
+
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s -check-prefix=TLS-CHECK
+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple %itanium_abi_triple -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp -x c++ -triple %itanium_abi_triple -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TLS-CHECK
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=TLS-LAMBDA %s
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=TLS-BLOCKS %s
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DARRAY -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck -check-prefix=TLS-ARRAY %s
+// REQUIRES: tls
// expected-no-diagnostics
#ifndef ARRAY
#ifndef HEADER
@@ -25,7 +33,9 @@ struct S {
// CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
// CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
// CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
-
+// TLS-CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
+// TLS-CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
+// TLS-CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
// CHECK-DAG: [[T_VAR:@.+]] = internal global i{{[0-9]+}} 1122,
// CHECK-DAG: [[VEC:@.+]] = internal global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 1, i{{[0-9]+}} 2],
@@ -35,6 +45,15 @@ struct S {
// CHECK-DAG: [[TMAIN_VEC:@.+]] = linkonce_odr global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 3, i{{[0-9]+}} 3],
// CHECK-DAG: [[TMAIN_S_ARR:@.+]] = linkonce_odr global [2 x [[S_INT_TY]]] zeroinitializer,
// CHECK-DAG: [[TMAIN_VAR:@.+]] = linkonce_odr global [[S_INT_TY]] zeroinitializer,
+// TLS-CHECK-DAG: [[T_VAR:@.+]] = internal thread_local global i{{[0-9]+}} 1122,
+// TLS-CHECK-DAG: [[VEC:@.+]] = internal thread_local global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 1, i{{[0-9]+}} 2],
+// TLS-CHECK-DAG: [[S_ARR:@.+]] = internal thread_local global [2 x [[S_FLOAT_TY]]] zeroinitializer,
+// TLS-CHECK-DAG: [[VAR:@.+]] = internal thread_local global [[S_FLOAT_TY]] zeroinitializer,
+// TLS-CHECK-DAG: [[TMAIN_T_VAR:@.+]] = linkonce_odr thread_local global i{{[0-9]+}} 333,
+// TLS-CHECK-DAG: [[TMAIN_VEC:@.+]] = linkonce_odr thread_local global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 3, i{{[0-9]+}} 3],
+// TLS-CHECK-DAG: [[TMAIN_S_ARR:@.+]] = linkonce_odr thread_local global [2 x [[S_INT_TY]]] zeroinitializer,
+// TLS-CHECK-DAG: [[TMAIN_VAR:@.+]] = linkonce_odr thread_local global [[S_INT_TY]] zeroinitializer,
+
template <typename T>
T tmain() {
S<T> test;
@@ -59,12 +78,28 @@ int main() {
// LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
// LAMBDA-LABEL: @main
// LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]](
+ // TLS-LAMBDA: [[G_CAPTURE_TY:%.+]] = type { i{{[0-9]+}}* }
+ // TLS-LAMBDA: [[G:@.+]] = {{.*}}thread_local {{.*}}global i{{[0-9]+}} 1212,
+ // TLS-LAMBDA-LABEL: @main
+ // TLS-LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]](
[&]() {
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
// LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8*
+
+ // TLS-LAMBDA-DAG: [[G_CPY_ADDR:%.+]] = getelementptr inbounds [[G_CAPTURE_TY]], [[G_CAPTURE_TY]]* [[G_CAPTURE:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
+ // TLS-LAMBDA-DAG: [[G_CPY_VAL:%.+]] = call i{{[0-9]+}}* [[G_CTOR:@.+]]()
+ // TLS-LAMBDA: store i{{[0-9]+}}* [[G_CPY_VAL]], i{{[0-9]+}}** [[G_CPY_ADDR]]
+ // TLS-LAMBDA: [[G_CAPTURE_CAST:%.+]] = bitcast [[G_CAPTURE_TY]]* [[G_CAPTURE]] to i8*
+ // TLS-LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* [[G_CAPTURE_CAST]]
+
+ // TLS-LAMBDA: define {{.*}}i{{[0-9]+}}* [[G_CTOR]]() {
+ // TLS-LAMBDA: ret i{{[0-9]+}}* [[G]]
+ // TLS-LAMBDA: }
+
#pragma omp parallel copyin(g)
{
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
+ // TLS-LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, [[G_CAPTURE_TY]]* [[ARG:%.+]])
// threadprivate_g = g;
// LAMBDA: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
@@ -76,14 +111,31 @@ int main() {
// LAMBDA: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
// LAMBDA: [[DONE]]
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_FIELD:%.+]] = getelementptr inbounds [[G_CAPTURE_TY]], [[G_CAPTURE_TY]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_SRC:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_CAPTURE_FIELD]]
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_SRCC:%.+]] = ptrtoint i{{[0-9]+}}* [[G_CAPTURE_SRC]] to i{{[0-9]+}}
+ // TLS-LAMBDA-DAG: [[G_CAPTURE_DSTC:%.+]] = ptrtoint i{{[0-9]+}}* [[G_CAPTURE_DST]] to i{{[0-9]+}}
+ // TLS-LAMBDA: icmp ne i{{[0-9]+}} {{%.+}}, {{%.+}}
+ // TLS-LAMBDA: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+ // TLS-LAMBDA: [[NOT_MASTER]]
+ // TLS-LAMBDA: load i{{[0-9]+}}, i{{[0-9]+}}* [[G_CAPTURE_SRC]],
+ // TLS-LAMBDA: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* [[G_CAPTURE_DST]],
+ // TLS-LAMBDA: [[DONE]]
+
// LAMBDA: call {{.*}}i32 @__kmpc_cancel_barrier(
+ // TLS-LAMBDA: call {{.*}}i32 @__kmpc_cancel_barrier(
g = 1;
// LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}*
+ // TLS-LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}*
[&]() {
// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
g = 2;
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
+
+ // TLS-LAMBDA: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-LAMBDA: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_CAPTURE_DST]]
}();
}
}();
@@ -92,12 +144,28 @@ int main() {
// BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
// BLOCKS-LABEL: @main
// BLOCKS: call {{.*}}void {{%.+}}(i8
+
+ // TLS-BLOCKS: [[G_CAPTURE_TY:%.+]] = type { i{{[0-9]+}}* }
+ // TLS-BLOCKS: [[G:@.+]] = {{.*}}thread_local {{.*}}global i{{[0-9]+}} 1212,
+ // TLS-BLOCKS-LABEL: @main
+ // TLS-BLOCKS: call {{.*}}void {{%.+}}(i8
^{
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
// BLOCKS: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8*
+
+ // TLS-BLOCKS-DAG: [[G_CPY_ADDR:%.+]] = getelementptr inbounds [[G_CAPTURE_TY]], [[G_CAPTURE_TY]]* [[G_CAPTURE:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
+ // TLS-BLOCKS-DAG: [[G_CPY_VAL:%.+]] = call i{{[0-9]+}}* [[G_CTOR:@.+]]()
+ // TLS-BLOCKS: store i{{[0-9]+}}* [[G_CPY_VAL]], i{{[0-9]+}}** [[G_CPY_ADDR]]
+ // TLS-BLOCKS: [[G_CAPTURE_CAST:%.+]] = bitcast [[G_CAPTURE_TY]]* [[G_CAPTURE]] to i8*
+ // TLS-BLOCKS: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* [[G_CAPTURE_CAST]]
+
+ // TLS-BLOCKS: define {{.*}}i{{[0-9]+}}* [[G_CTOR]]() {
+ // TLS-BLOCKS: ret i{{[0-9]+}}* [[G]]
+ // TLS-BLOCKS: }
#pragma omp parallel copyin(g)
{
// BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
+ // TLS-BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, [[G_CAPTURE_TY]]* [[ARG:%.+]])
// threadprivate_g = g;
// BLOCKS: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
@@ -109,19 +177,44 @@ int main() {
// BLOCKS: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
// BLOCKS: [[DONE]]
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_FIELD:%.+]] = getelementptr inbounds [[G_CAPTURE_TY]], [[G_CAPTURE_TY]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_SRC:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_CAPTURE_FIELD]]
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_SRCC:%.+]] = ptrtoint i{{[0-9]+}}* [[G_CAPTURE_SRC]] to i{{[0-9]+}}
+ // TLS-BLOCKS-DAG: [[G_CAPTURE_DSTC:%.+]] = ptrtoint i{{[0-9]+}}* [[G_CAPTURE_DST]] to i{{[0-9]+}}
+ // TLS-BLOCKS: icmp ne i{{[0-9]+}} {{%.+}}, {{%.+}}
+ // TLS-BLOCKS: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+ // TLS-BLOCKS: [[NOT_MASTER]]
+ // TLS-BLOCKS: load i{{[0-9]+}}, i{{[0-9]+}}* [[G_CAPTURE_SRC]],
+ // TLS-BLOCKS: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* [[G_CAPTURE_DST]],
+ // TLS-BLOCKS: [[DONE]]
+
// BLOCKS: call {{.*}}i32 @__kmpc_cancel_barrier(
+ // TLS-BLOCKS: call {{.*}}i32 @__kmpc_cancel_barrier(
g = 1;
// BLOCKS: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}*
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: call {{.*}}void {{%.+}}(i8
+
+ // TLS-BLOCKS: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-BLOCKS: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_CAPTURE_DST]]
+ // TLS-BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // TLS-BLOCKS: call {{.*}}void {{%.+}}(i8
^{
// BLOCKS: define {{.+}} void {{@.+}}(i8*
+ // TLS-BLOCKS: define {{.+}} void {{@.+}}(i8*
g = 2;
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
// BLOCKS: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}*
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: ret
+
+ // TLS-BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // TLS-BLOCKS: [[G_CAPTURE_DST:%.+]] = call i{{[0-9]+}}* [[G_CTOR]]()
+ // TLS-BLOCKS: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_CAPTURE_DST]]
+ // TLS-BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // TLS-BLOCKS: ret
}();
}
}();
@@ -153,12 +246,34 @@ int main() {
// CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
// CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
// CHECK: ret
-//
+
+// TLS-CHECK-LABEL: @main
+// TLS-CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
+// TLS-CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* [[TEST]], [[S_FLOAT_TY]]*
+// TLS-CHECK-DAG: store i{{[0-9]+}}* [[T_VAR]], i{{[0-9]+}}** [[T_VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: store [2 x i{{[0-9]+}}]* [[VEC]], [2 x i{{[0-9]+}}]** [[VEC_FIELD:%.+]],
+// TLS-CHECK-DAG: store [2 x [[S_FLOAT_TY]]]* [[S_ARR]], [2 x [[S_FLOAT_TY]]]** [[S_ARR_FIELD:%.+]],
+// TLS-CHECK-DAG: store [[S_FLOAT_TY]]* [[VAR]], [[S_FLOAT_TY]]** [[VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: [[T_VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[VEC_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[S_ARR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[MAIN_MICROTASK:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// TLS-CHECK-DAG: store i{{[0-9]+}}* [[T_VAR]], i{{[0-9]+}}** [[T_VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: [[T_VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[MAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// TLS-CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
+// TLS-CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
+// TLS-CHECK: ret
+
// CHECK: define internal {{.*}}void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: define internal {{.*}}void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// TLS-CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+
// threadprivate_t_var = t_var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[T_VAR]]
// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
@@ -168,10 +283,24 @@ int main() {
// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR]],
// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load i32*, i32** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_LONG:%.+]] = ptrtoint i32* [[MASTER_REF]] to i{{[0-9]+}}
+// TLS-CHECK: icmp ne i{{[0-9]+}} [[MASTER_LONG]], ptrtoint (i{{[0-9]+}}* [[T_VAR]] to i{{[0-9]+}})
+// TLS-CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// TLS-CHECK: [[NOT_MASTER]]
+// TLS-CHECK: [[MASTER_VAL:%.+]] = load i32, i32* [[MASTER_REF]]
+// TLS-CHECK: store i32 [[MASTER_VAL]], i32* [[T_VAR]]
+
// threadprivate_vec = vec;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[VEC]]
// CHECK: call void @llvm.memcpy{{.*}}(i8* %{{.+}}, i8* bitcast ([2 x i{{[0-9]+}}]* [[VEC]] to i8*),
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [2 x i32]*, [2 x i32]** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_CAST:%.+]] = bitcast [2 x i32]* [[MASTER_REF]] to i8*
+// TLS-CHECK: call void @llvm.memcpy{{.*}}(i8* bitcast ([2 x i{{[0-9]+}}]* [[VEC]] to i8*), i8* [[MASTER_CAST]]
+
// threadprivate_s_arr = s_arr;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[S_ARR]]
// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* {{%.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
@@ -182,19 +311,43 @@ int main() {
// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_CAST:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[MASTER_REF]] to [[S_FLOAT_TY]]*
+// TLS-CHECK-DAG: [[S_ARR_SRC_BEGIN:%.+]] = phi [[S_FLOAT_TY]]* {{.*}}[[MASTER_CAST]]
+// TLS-CHECK-DAG: [[S_ARR_DST_BEGIN:%.+]] = phi [[S_FLOAT_TY]]* {{.*}}getelementptr inbounds ([2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0)
+// TLS-CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
+// TLS-CHECK-DAG: [[S_ARR_SRC_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_SRC_BEGIN]], i{{[0-9]+}} 1
+// TLS-CHECK-DAG: [[S_ARR_DST_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_DST_BEGIN]], i{{[0-9]+}} 1
+// TLS-CHECK: icmp eq [[S_FLOAT_TY]]* [[S_ARR_DST_END]], getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* getelementptr inbounds ([2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0), i{{[0-9]+}} 2)
+// TLS-CHECK: br i1 %{{.*}}, label %[[ARR_DONE:.+]], label {{.*}}
+// TLS-CHECK: [[ARR_DONE]]
+
// threadprivate_var = var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[VAR]]
// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{%.+}}, [[S_FLOAT_TY]]* {{.*}}[[VAR]])
// CHECK: [[DONE]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[MASTER_FIELD]]
+// TLS-CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{.*}}[[VAR]], [[S_FLOAT_TY]]* {{.*}}[[MASTER_REF]])
+
// CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
+// TLS-CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// TLS-CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// TLS-CHECK: ret void
+
// CHECK: define internal {{.*}}void [[MAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: define internal {{.*}}void [[MAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// TLS-CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+
// threadprivate_t_var = t_var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[T_VAR]]
// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
@@ -205,9 +358,24 @@ int main() {
// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
// CHECK: [[DONE]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load i32*, i32** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_LONG:%.+]] = ptrtoint i32* [[MASTER_REF]] to i{{[0-9]+}}
+// TLS-CHECK: icmp ne i{{[0-9]+}} [[MASTER_LONG]], ptrtoint (i{{[0-9]+}}* [[T_VAR]] to i{{[0-9]+}})
+// TLS-CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// TLS-CHECK: [[NOT_MASTER]]
+// TLS-CHECK: [[MASTER_VAL:%.+]] = load i32, i32* [[MASTER_REF]]
+// TLS-CHECK: store i32 [[MASTER_VAL]], i32* [[T_VAR]]
+// TLS-CHECK: [[DONE]]
+
// CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
+// TLS-CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// TLS-CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// TLS-CHECK: ret void
+
// CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[TEST]], [[S_INT_TY]]*
@@ -215,11 +383,30 @@ int main() {
// CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
// CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
// CHECK: ret
+
+// TLS-CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
+// TLS-CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
+// TLS-CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[TEST]], [[S_INT_TY]]*
+// TLS-CHECK-DAG: store i{{[0-9]+}}* [[TMAIN_T_VAR]], i{{[0-9]+}}** [[T_VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: store [2 x i{{[0-9]+}}]* [[TMAIN_VEC]], [2 x i{{[0-9]+}}]** [[VEC_FIELD:%.+]],
+// TLS-CHECK-DAG: store [2 x [[S_INT_TY]]]* [[TMAIN_S_ARR]], [2 x [[S_INT_TY]]]** [[S_ARR_FIELD:%.+]],
+// TLS-CHECK-DAG: store [[S_INT_TY]]* [[TMAIN_VAR]], [[S_INT_TY]]** [[VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: [[T_VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[VEC_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[S_ARR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK-DAG: [[VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// TLS-CHECK-DAG: store i{{[0-9]+}}* [[TMAIN_T_VAR]], i{{[0-9]+}}** [[T_VAR_FIELD:%.+]],
+// TLS-CHECK-DAG: [[T_VAR_FIELD]] = getelementptr inbounds {{.*}}
+// TLS-CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
//
// CHECK: define internal {{.*}}void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+//
+// TLS-CHECK: define internal {{.*}}void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// TLS-CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// threadprivate_t_var = t_var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_T_VAR]]
@@ -230,10 +417,24 @@ int main() {
// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[TMAIN_T_VAR]],
// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load i32*, i32** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_LONG:%.+]] = ptrtoint i32* [[MASTER_REF]] to i{{[0-9]+}}
+// TLS-CHECK: icmp ne i{{[0-9]+}} [[MASTER_LONG]], ptrtoint (i{{[0-9]+}}* [[TMAIN_T_VAR]] to i{{[0-9]+}})
+// TLS-CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// TLS-CHECK: [[NOT_MASTER]]
+// TLS-CHECK: [[MASTER_VAL:%.+]] = load i32, i32* [[MASTER_REF]]
+// TLS-CHECK: store i32 [[MASTER_VAL]], i32* [[TMAIN_T_VAR]]
+
// threadprivate_vec = vec;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_VEC]]
// CHECK: call {{.*}}void @llvm.memcpy{{.*}}(i8* %{{.+}}, i8* bitcast ([2 x i{{[0-9]+}}]* [[TMAIN_VEC]] to i8*),
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [2 x i32]*, [2 x i32]** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_CAST:%.+]] = bitcast [2 x i32]* [[MASTER_REF]] to i8*
+// TLS-CHECK: call void @llvm.memcpy{{.*}}(i8* bitcast ([2 x i{{[0-9]+}}]* [[TMAIN_VEC]] to i8*), i8* [[MASTER_CAST]]
+
// threadprivate_s_arr = s_arr;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_S_ARR]]
// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* {{%.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
@@ -244,19 +445,43 @@ int main() {
// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_CAST:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[MASTER_REF]] to [[S_INT_TY]]*
+// TLS-CHECK-DAG: [[S_ARR_SRC_BEGIN:%.+]] = phi [[S_INT_TY]]* {{.*}}[[MASTER_CAST]]
+// TLS-CHECK-DAG: [[S_ARR_DST_BEGIN:%.+]] = phi [[S_INT_TY]]* {{.*}}getelementptr inbounds ([2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[TMAIN_S_ARR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0)
+// TLS-CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
+// TLS-CHECK-DAG: [[S_ARR_SRC_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_SRC_BEGIN]], i{{[0-9]+}} 1
+// TLS-CHECK-DAG: [[S_ARR_DST_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_DST_BEGIN]], i{{[0-9]+}} 1
+// TLS-CHECK: icmp eq [[S_INT_TY]]* [[S_ARR_DST_END]], getelementptr ([[S_INT_TY]], [[S_INT_TY]]* getelementptr inbounds ([2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[TMAIN_S_ARR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0), i{{[0-9]+}} 2)
+// TLS-CHECK: br i1 %{{.*}}, label %[[ARR_DONE:.+]], label {{.*}}
+// TLS-CHECK: [[ARR_DONE]]
+
// threadprivate_var = var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_VAR]]
// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{%.+}}, [[S_INT_TY]]* {{.*}}[[TMAIN_VAR]])
// CHECK: [[DONE]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[MASTER_FIELD]]
+// TLS-CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{.*}}[[TMAIN_VAR]], [[S_INT_TY]]* {{.*}}[[MASTER_REF]])
+
// CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
+// TLS-CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// TLS-CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// TLS-CHECK: ret void
+
// CHECK: define internal {{.*}}void [[TMAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: define internal {{.*}}void [[TMAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// TLS-CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+
// threadprivate_t_var = t_var;
// CHECK: call {{.*}}i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_T_VAR]]
// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
@@ -267,12 +492,30 @@ int main() {
// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
// CHECK: [[DONE]]
+// TLS-CHECK: [[MASTER_FIELD:%.+]] = getelementptr inbounds {{.+}}
+// TLS-CHECK: [[MASTER_REF:%.+]] = load i32*, i32** [[MASTER_FIELD]]
+// TLS-CHECK: [[MASTER_LONG:%.+]] = ptrtoint i32* [[MASTER_REF]] to i{{[0-9]+}}
+// TLS-CHECK: icmp ne i{{[0-9]+}} [[MASTER_LONG]], ptrtoint (i{{[0-9]+}}* [[TMAIN_T_VAR]] to i{{[0-9]+}})
+// TLS-CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// TLS-CHECK: [[NOT_MASTER]]
+// TLS-CHECK: [[MASTER_VAL:%.+]] = load i32, i32* [[MASTER_REF]]
+// TLS-CHECK: store i32 [[MASTER_VAL]], i32* [[TMAIN_T_VAR]]
+// TLS-CHECK: [[DONE]]
+
// CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
+// TLS-CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// TLS-CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+// TLS-CHECK: call {{.*}}i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// TLS-CHECK: ret void
+
#endif
#else
// ARRAY-LABEL: array_func
+// TLS-ARRAY: [[ARR_CAPTURETY:%.+]] = type { [2 x {{.*}}]*, [2 x {{.*}}]* }
+// TLS-ARRAY-LABEL: array_func
+
struct St {
int a, b;
St() : a(0), b(0) {}
@@ -287,6 +530,18 @@ void array_func() {
// ARRAY: call i8* @__kmpc_threadprivate_cached(
// ARRAY: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* bitcast ([2 x i32]* @{{.+}} to i8*), i64 8, i32 4, i1 false)
// ARRAY: call dereferenceable(8) %struct.St* @{{.+}}(%struct.St* %{{.+}}, %struct.St* dereferenceable(8) %{{.+}})
+
+// TLS-ARRAY-DAG: [[ARR_CAP1:%.+]] = getelementptr inbounds [[ARR_CAPTURETY]], [[ARR_CAPTURETY]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
+// TLS-ARRAY-DAG: [[ARR_CAP2:%.+]] = getelementptr inbounds [[ARR_CAPTURETY]], [[ARR_CAPTURETY]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1
+// TLS-ARRAY-DAG: store [2 x {{.*}}]* @{{.*}}, [2 x {{.*}}]** [[ARR_CAP1]]
+// TLS-ARRAY-DAG: store [2 x {{.*}}]* @{{.*}}, [2 x {{.*}}]** [[ARR_CAP2]]
+// TLS-ARRAY: @__kmpc_fork_call(
+// TLS-ARRAY-DAG: call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast ([2 x i32]* @{{.+}} to i8*), i8* [[REF:%.+]], i64 8, i32 4, i1 false)
+// TLS-ARRAY-DAG: [[REF]] = bitcast [2 x i32]* [[REFT:%.+]] to i8*
+// TLS-ARRAY-DAG: [[REFT]] = load [2 x i32]*, [2 x i32]** [[FIELDADDR:%.+]],
+// TLS-ARRAY-DAG: [[FIELDADDR]] = getelementptr inbounds [[ARR_CAPTURETY]], [[ARR_CAPTURETY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}}
+// TLS-ARRAY: call dereferenceable(8) %struct.St* @{{.+}}(%struct.St* %{{.+}}, %struct.St* dereferenceable(8) %{{.+}})
+
#pragma omp threadprivate(a, s)
#pragma omp parallel copyin(a, s)
;
diff --git a/test/OpenMP/parallel_for_loop_messages.cpp b/test/OpenMP/parallel_for_loop_messages.cpp
index 09a15e2dc337..7d14a3c9b74c 100644
--- a/test/OpenMP/parallel_for_loop_messages.cpp
+++ b/test/OpenMP/parallel_for_loop_messages.cpp
@@ -353,12 +353,12 @@ public:
typedef int difference_type;
typedef std::random_access_iterator_tag iterator_category;
};
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
int operator-(GoodIter a, GoodIter b) { return 0; }
// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}}
GoodIter operator-(GoodIter a) { return a; }
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
GoodIter operator-(GoodIter a, int v) { return GoodIter(); }
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}}
@@ -401,7 +401,7 @@ int test_with_random_access_iterator() {
#pragma omp parallel for
for (begin = GoodIter(0); begin < end; ++begin)
++begin;
-// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}}
+// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'Iter0')}}
// expected-error@+2 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
#pragma omp parallel for
for (begin = begin0; begin < end; ++begin)
@@ -467,7 +467,7 @@ int test_with_random_access_iterator() {
#pragma omp parallel for
for (Iter1 I = begin1; I >= end1; ++I)
++I;
-// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// Initializer is constructor with all default params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
diff --git a/test/OpenMP/parallel_for_simd_loop_messages.cpp b/test/OpenMP/parallel_for_simd_loop_messages.cpp
index 0473b248a69d..85d8574ba9f4 100644
--- a/test/OpenMP/parallel_for_simd_loop_messages.cpp
+++ b/test/OpenMP/parallel_for_simd_loop_messages.cpp
@@ -353,12 +353,12 @@ public:
typedef int difference_type;
typedef std::random_access_iterator_tag iterator_category;
};
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'GoodIter' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
int operator-(GoodIter a, GoodIter b) { return 0; }
// expected-note@+1 3 {{candidate function not viable: requires single argument 'a', but 2 arguments were provided}}
GoodIter operator-(GoodIter a) { return a; }
-// expected-note@+2 {{candidate function not viable: no known conversion from 'const Iter0' to 'int' for 2nd argument}}
+// expected-note@+2 {{candidate function not viable: no known conversion from 'Iter0' to 'int' for 2nd argument}}
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter1' to 'GoodIter' for 1st argument}}
GoodIter operator-(GoodIter a, int v) { return GoodIter(); }
// expected-note@+1 2 {{candidate function not viable: no known conversion from 'Iter0' to 'GoodIter' for 1st argument}}
@@ -401,7 +401,7 @@ int test_with_random_access_iterator() {
#pragma omp parallel for simd
for (begin = GoodIter(0); begin < end; ++begin)
++begin;
-// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'const Iter0')}}
+// expected-error@+3 {{invalid operands to binary expression ('GoodIter' and 'Iter0')}}
// expected-error@+2 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
#pragma omp parallel for simd
for (begin = begin0; begin < end; ++begin)
@@ -467,7 +467,7 @@ int test_with_random_access_iterator() {
#pragma omp parallel for simd
for (Iter1 I = begin1; I >= end1; ++I)
++I;
-// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+// expected-error@+5 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+4 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// Initializer is constructor with all default params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
diff --git a/test/OpenMP/simd_linear_messages.cpp b/test/OpenMP/simd_linear_messages.cpp
index 78f905fd9460..6fac26290b4e 100644
--- a/test/OpenMP/simd_linear_messages.cpp
+++ b/test/OpenMP/simd_linear_messages.cpp
@@ -156,6 +156,7 @@ namespace C {
using A::x;
}
+int f;
int main(int argc, char **argv) {
double darr[100];
// expected-note@+1 {{in instantiation of function template specialization 'test_template<-4, double, int>' requested here}}
@@ -167,6 +168,8 @@ int main(int argc, char **argv) {
S5 g(5); // expected-note {{'g' defined here}}
int i;
int &j = i; // expected-note {{'j' defined here}}
+ #pragma omp simd linear(f) linear(f) // expected-error {{linear variable cannot be linear}} expected-note {{defined as linear}}
+ for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear // expected-error {{expected '(' after 'linear'}}
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear ( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}}
diff --git a/test/OpenMP/simd_loop_messages.cpp b/test/OpenMP/simd_loop_messages.cpp
index fe8e70a9b8a0..bd7dd5bf52d1 100644
--- a/test/OpenMP/simd_loop_messages.cpp
+++ b/test/OpenMP/simd_loop_messages.cpp
@@ -456,7 +456,7 @@ int test_with_random_access_iterator() {
++I;
// Initializer is constructor with all default params.
- // expected-error@+4 {{invalid operands to binary expression ('Iter1' and 'Iter1')}}
+ // expected-error@+4 {{invalid operands to binary expression ('Iter1' and 'float')}}
// expected-error@+3 {{could not calculate number of iterations calling 'operator-' with upper and lower loop bounds}}
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
#pragma omp simd
diff --git a/test/PCH/objc_boxable.m b/test/PCH/objc_boxable.m
new file mode 100644
index 000000000000..2cd9c23d5417
--- /dev/null
+++ b/test/PCH/objc_boxable.m
@@ -0,0 +1,17 @@
+// Test objc_boxable update record
+
+// RUN: %clang_cc1 -x objective-c %S/objc_boxable_record.h -emit-pch -o %t1
+// RUN: %clang_cc1 -x objective-c %S/objc_boxable_record_attr.h -include-pch %t1 -emit-pch -o %t2
+// RUN: %clang_cc1 %s -include-pch %t2 -fsyntax-only -verify
+
+// expected-no-diagnostics
+
+__attribute__((objc_root_class))
+@interface NSValue
++ (NSValue *)valueWithBytes:(const void *)bytes objCType:(const char *)type;
+@end
+
+void doStuff(struct boxable b) {
+ id v = @(b);
+}
+
diff --git a/test/PCH/objc_boxable_record.h b/test/PCH/objc_boxable_record.h
new file mode 100644
index 000000000000..db2a3a8fc561
--- /dev/null
+++ b/test/PCH/objc_boxable_record.h
@@ -0,0 +1,5 @@
+// used with objc_boxable.m test
+struct boxable {
+ int dummy;
+};
+
diff --git a/test/PCH/objc_boxable_record_attr.h b/test/PCH/objc_boxable_record_attr.h
new file mode 100644
index 000000000000..96a1559846ad
--- /dev/null
+++ b/test/PCH/objc_boxable_record_attr.h
@@ -0,0 +1,3 @@
+// used with objc_boxable.m test
+typedef struct __attribute((objc_boxable)) boxable boxable;
+
diff --git a/test/Preprocessor/predefined-arch-macros.c b/test/Preprocessor/predefined-arch-macros.c
index 5126ef3d94cf..14fc49478af5 100644
--- a/test/Preprocessor/predefined-arch-macros.c
+++ b/test/Preprocessor/predefined-arch-macros.c
@@ -1748,3 +1748,12 @@
// RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_HTM
//
// CHECK_SYSTEMZ_HTM: #define __HTM__ 1
+//
+// RUN: %clang -fzvector -E -dM %s -o - 2>&1 \
+// RUN: -target s390x-unknown-linux \
+// RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_ZVECTOR
+// RUN: %clang -mzvector -E -dM %s -o - 2>&1 \
+// RUN: -target s390x-unknown-linux \
+// RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_ZVECTOR
+//
+// CHECK_SYSTEMZ_ZVECTOR: #define __VEC__ 10301
diff --git a/test/Sema/dllimport.c b/test/Sema/dllimport.c
index 3ca1baa299c1..d81aecd604c7 100644
--- a/test/Sema/dllimport.c
+++ b/test/Sema/dllimport.c
@@ -143,7 +143,7 @@ __declspec(dllimport) void redecl3(); // expected-note{{previous declaration is
void redecl4(); // expected-note{{previous declaration is here}}
void useRedecl4() { redecl4(); }
-__declspec(dllimport) void redecl4(); // expected-error{{redeclaration of 'redecl4' cannot add 'dllimport' attribute}}
+__declspec(dllimport) void redecl4(); // expected-warning{{redeclaration of 'redecl4' should not add 'dllimport' attribute}}
// Allow with a warning if the decl hasn't been used yet.
void redecl5(); // expected-note{{previous declaration is here}}
diff --git a/test/Sema/typo-correction.c b/test/Sema/typo-correction.c
index ff43064d5b75..4ef50570899c 100644
--- a/test/Sema/typo-correction.c
+++ b/test/Sema/typo-correction.c
@@ -49,3 +49,9 @@ extern double cabs(_Complex double z);
void fn1() {
cabs(errij); // expected-error {{use of undeclared identifier 'errij'}}
}
+
+extern long afunction(int); // expected-note {{'afunction' declared here}}
+void fn2() {
+ f(THIS_IS_AN_ERROR, // expected-error {{use of undeclared identifier 'THIS_IS_AN_ERROR'}}
+ afunction(afunction_)); // expected-error {{use of undeclared identifier 'afunction_'; did you mean 'afunction'?}}
+}
diff --git a/test/Sema/zvector.c b/test/Sema/zvector.c
new file mode 100644
index 000000000000..5220a629cfe1
--- /dev/null
+++ b/test/Sema/zvector.c
@@ -0,0 +1,1009 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -fzvector \
+// RUN: -fno-lax-vector-conversions -W -Wall -Wconversion \
+// RUN: -Werror -fsyntax-only -verify %s
+
+vector signed char sc, sc2;
+vector unsigned char uc, uc2;
+vector bool char bc, bc2;
+
+vector signed short ss, ss2;
+vector unsigned short us, us2;
+vector bool short bs, bs2;
+
+vector signed int si, si2;
+vector unsigned int ui, ui2;
+vector bool int bi, bi2;
+
+vector signed long long sl, sl2;
+vector unsigned long long ul, ul2;
+vector bool long long bl, bl2;
+
+vector double fd, fd2;
+
+vector long ll; // expected-error {{cannot use 'long' with '__vector'}}
+vector float ff; // expected-error {{cannot use 'float' with '__vector'}}
+
+signed char sc_scalar;
+unsigned char uc_scalar;
+
+signed short ss_scalar;
+unsigned short us_scalar;
+
+signed int si_scalar;
+unsigned int ui_scalar;
+
+signed long sl_scalar;
+unsigned long ul_scalar;
+
+double fd_scalar;
+
+void foo(void)
+{
+ // -------------------------------------------------------------------------
+ // Test assignment.
+ // -------------------------------------------------------------------------
+
+ sc = sc2;
+ uc = uc2;
+ bc = bc2;
+
+ ss = ss2;
+ us = us2;
+ bs = bs2;
+
+ si = si2;
+ ui = ui2;
+ bi = bi2;
+
+ sl = sl2;
+ ul = ul2;
+ bl = bl2;
+ fd = fd2;
+
+ sc = uc2; // expected-error {{incompatible type}}
+ sc = bc2; // expected-error {{incompatible type}}
+ uc = sc2; // expected-error {{incompatible type}}
+ uc = bc2; // expected-error {{incompatible type}}
+ bc = sc2; // expected-error {{incompatible type}}
+ bc = uc2; // expected-error {{incompatible type}}
+
+ sc = sc_scalar; // expected-error {{incompatible type}}
+ sc = uc_scalar; // expected-error {{incompatible type}}
+ uc = sc_scalar; // expected-error {{incompatible type}}
+ uc = uc_scalar; // expected-error {{incompatible type}}
+ bc = sc_scalar; // expected-error {{incompatible type}}
+ bc = uc_scalar; // expected-error {{incompatible type}}
+
+ sc = ss2; // expected-error {{incompatible type}}
+ sc = si2; // expected-error {{incompatible type}}
+ sc = sl2; // expected-error {{incompatible type}}
+ sc = fd2; // expected-error {{incompatible type}}
+
+ ss = sc2; // expected-error {{incompatible type}}
+ si = sc2; // expected-error {{incompatible type}}
+ sl = sc2; // expected-error {{incompatible type}}
+ fd = sc2; // expected-error {{incompatible type}}
+
+ uc = us2; // expected-error {{incompatible type}}
+ uc = ui2; // expected-error {{incompatible type}}
+ uc = ul2; // expected-error {{incompatible type}}
+ uc = fd2; // expected-error {{incompatible type}}
+
+ us = uc2; // expected-error {{incompatible type}}
+ ui = uc2; // expected-error {{incompatible type}}
+ ul = uc2; // expected-error {{incompatible type}}
+ fd = uc2; // expected-error {{incompatible type}}
+
+ bc = us2; // expected-error {{incompatible type}}
+ bc = ui2; // expected-error {{incompatible type}}
+ bc = ul2; // expected-error {{incompatible type}}
+ bc = fd2; // expected-error {{incompatible type}}
+
+ bs = bc2; // expected-error {{incompatible type}}
+ bi = bc2; // expected-error {{incompatible type}}
+ bl = bc2; // expected-error {{incompatible type}}
+ fd = bc2; // expected-error {{incompatible type}}
+
+ // -------------------------------------------------------------------------
+ // Test casts to same element width.
+ // -------------------------------------------------------------------------
+
+ sc = (vector signed char)bc2;
+ bc = (vector bool char)uc2;
+ uc = (vector unsigned char)sc2;
+
+ ss = (vector signed short)bs2;
+ bs = (vector bool short)us2;
+ us = (vector unsigned short)ss2;
+
+ si = (vector signed int)bi2;
+ bi = (vector bool int)ui2;
+ ui = (vector unsigned int)si2;
+
+ sl = (vector signed long long)bl2;
+ bl = (vector bool long long)ul2;
+ ul = (vector unsigned long long)fd2;
+ fd = (vector double)sl2;
+
+ // -------------------------------------------------------------------------
+ // Test casts to different element width.
+ // -------------------------------------------------------------------------
+
+ sc = (vector signed char)bs2;
+ bc = (vector bool char)us2;
+ uc = (vector unsigned char)fd2;
+
+ ss = (vector signed short)bi2;
+ bs = (vector bool short)ui2;
+ us = (vector unsigned short)fd2;
+
+ si = (vector signed int)bl2;
+ bi = (vector bool int)ul2;
+ ui = (vector unsigned int)fd2;
+
+ sl = (vector signed long long)bc2;
+ bl = (vector bool long long)uc2;
+ ul = (vector unsigned long long)sc2;
+ fd = (vector double)sc2;
+
+ // -------------------------------------------------------------------------
+ // Test ++.
+ // -------------------------------------------------------------------------
+
+ ++sc2;
+ ++uc2;
+ ++bc2; // expected-error {{cannot increment}}
+
+ ++ss2;
+ ++us2;
+ ++bs2; // expected-error {{cannot increment}}
+
+ ++si2;
+ ++ui2;
+ ++bi2; // expected-error {{cannot increment}}
+
+ ++sl2;
+ ++ul2;
+ ++bl2; // expected-error {{cannot increment}}
+
+ ++fd2;
+
+ sc++;
+ uc++;
+ bc++; // expected-error {{cannot increment}}
+
+ ss++;
+ us++;
+ bs++; // expected-error {{cannot increment}}
+
+ si++;
+ ui++;
+ bi++; // expected-error {{cannot increment}}
+
+ sl++;
+ ul++;
+ bl++; // expected-error {{cannot increment}}
+
+ fd++;
+
+ // -------------------------------------------------------------------------
+ // Test --.
+ // -------------------------------------------------------------------------
+
+ --sc2;
+ --uc2;
+ --bc2; // expected-error {{cannot decrement}}
+
+ --ss2;
+ --us2;
+ --bs2; // expected-error {{cannot decrement}}
+
+ --si2;
+ --ui2;
+ --bi2; // expected-error {{cannot decrement}}
+
+ --sl2;
+ --ul2;
+ --bl2; // expected-error {{cannot decrement}}
+
+ --fd2;
+
+ sc--;
+ uc--;
+ bc--; // expected-error {{cannot decrement}}
+
+ ss--;
+ us--;
+ bs--; // expected-error {{cannot decrement}}
+
+ si--;
+ ui--;
+ bi--; // expected-error {{cannot decrement}}
+
+ sl--;
+ ul--;
+ bl--; // expected-error {{cannot decrement}}
+
+ fd--;
+
+ // -------------------------------------------------------------------------
+ // Test unary +.
+ // -------------------------------------------------------------------------
+
+ sc = +sc2;
+ uc = +uc2;
+ bc = +bc2; // expected-error {{invalid argument type}}
+
+ ss = +ss2;
+ us = +us2;
+ bs = +bs2; // expected-error {{invalid argument type}}
+
+ si = +si2;
+ ui = +ui2;
+ bi = +bi2; // expected-error {{invalid argument type}}
+
+ sl = +sl2;
+ ul = +ul2;
+ bl = +bl2; // expected-error {{invalid argument type}}
+
+ fd = +fd2;
+
+ sc = +si2; // expected-error {{assigning to}}
+ ui = +si2; // expected-error {{assigning to}}
+ ui = +bi2; // expected-error {{invalid argument type}}
+
+ // -------------------------------------------------------------------------
+ // Test unary -.
+ // -------------------------------------------------------------------------
+
+ sc = -sc2;
+ uc = -uc2;
+ bc = -bc2; // expected-error {{invalid argument type}}
+
+ ss = -ss2;
+ us = -us2;
+ bs = -bs2; // expected-error {{invalid argument type}}
+
+ si = -si2;
+ ui = -ui2;
+ bi = -bi2; // expected-error {{invalid argument type}}
+
+ sl = -sl2;
+ ul = -ul2;
+ bl = -bl2; // expected-error {{invalid argument type}}
+
+ fd = -fd2;
+
+ sc = -si2; // expected-error {{assigning to}}
+ ui = -si2; // expected-error {{assigning to}}
+ ui = -bi2; // expected-error {{invalid argument type}}
+
+ // -------------------------------------------------------------------------
+ // Test ~.
+ // -------------------------------------------------------------------------
+
+ sc = ~sc2;
+ uc = ~uc2;
+ bc = ~bc2;
+
+ ss = ~ss2;
+ us = ~us2;
+ bs = ~bs2;
+
+ si = ~si2;
+ ui = ~ui2;
+ bi = ~bi2;
+
+ sl = ~sl2;
+ ul = ~ul2;
+ bl = ~bl2;
+
+ fd = ~fd2; // expected-error {{invalid argument}}
+
+ sc = ~si2; // expected-error {{assigning to}}
+ ui = ~si2; // expected-error {{assigning to}}
+ ui = ~bi2; // expected-error {{assigning to}}
+
+ // -------------------------------------------------------------------------
+ // Test binary +.
+ // -------------------------------------------------------------------------
+
+ sc = sc + sc2;
+ sc = sc + uc2; // expected-error {{can't convert}}
+ sc = uc + sc2; // expected-error {{can't convert}}
+ sc = sc + bc2;
+ sc = bc + sc2;
+
+ uc = uc + uc2;
+ uc = sc + uc2; // expected-error {{can't convert}}
+ uc = uc + sc2; // expected-error {{can't convert}}
+ uc = bc + uc2;
+ uc = uc + bc2;
+
+ bc = bc + bc2; // expected-error {{invalid operands}}
+ bc = bc + uc2; // expected-error {{incompatible type}}
+ bc = uc + bc2; // expected-error {{incompatible type}}
+ bc = bc + sc2; // expected-error {{incompatible type}}
+ bc = sc + bc2; // expected-error {{incompatible type}}
+
+ sc = sc + sc_scalar; // expected-error {{can't convert}}
+ sc = sc + uc_scalar; // expected-error {{can't convert}}
+ sc = sc_scalar + sc; // expected-error {{can't convert}}
+ sc = uc_scalar + sc; // expected-error {{can't convert}}
+ uc = uc + sc_scalar; // expected-error {{can't convert}}
+ uc = uc + uc_scalar; // expected-error {{can't convert}}
+ uc = sc_scalar + uc; // expected-error {{can't convert}}
+ uc = uc_scalar + uc; // expected-error {{can't convert}}
+
+ ss = ss + ss2;
+ us = us + us2;
+ bs = bs + bs2; // expected-error {{invalid operands}}
+
+ si = si + si2;
+ ui = ui + ui2;
+ bi = bi + bi2; // expected-error {{invalid operands}}
+
+ sl = sl + sl2;
+ ul = ul + ul2;
+ bl = bl + bl2; // expected-error {{invalid operands}}
+
+ fd = fd + fd2;
+ fd = fd + ul2; // expected-error {{can't convert}}
+ fd = sl + fd2; // expected-error {{can't convert}}
+
+ sc += sc2;
+ sc += uc2; // expected-error {{can't convert}}
+ sc += bc2;
+
+ uc += uc2;
+ uc += sc2; // expected-error {{can't convert}}
+ uc += bc2;
+
+ bc += bc2; // expected-error {{invalid operands}}
+ bc += sc2; // expected-error {{can't convert}}
+ bc += uc2; // expected-error {{can't convert}}
+
+ sc += ss2; // expected-error {{can't convert}}
+ sc += si2; // expected-error {{can't convert}}
+ sc += sl2; // expected-error {{can't convert}}
+ sc += fd2; // expected-error {{can't convert}}
+
+ sc += sc_scalar; // expected-error {{can't convert}}
+ sc += uc_scalar; // expected-error {{can't convert}}
+ uc += sc_scalar; // expected-error {{can't convert}}
+ uc += uc_scalar; // expected-error {{can't convert}}
+
+ ss += ss2;
+ us += us2;
+ bs += bs2; // expected-error {{invalid operands}}
+
+ si += si2;
+ ui += ui2;
+ bi += bi2; // expected-error {{invalid operands}}
+
+ sl += sl2;
+ ul += ul2;
+ bl += bl2; // expected-error {{invalid operands}}
+
+ fd += fd2;
+
+ // -------------------------------------------------------------------------
+ // Test that binary + rules apply to binary - too.
+ // -------------------------------------------------------------------------
+
+ sc = sc - sc2;
+ uc = uc - uc2;
+ bc = bc - bc2; // expected-error {{invalid operands}}
+
+ sc = uc - sc2; // expected-error {{can't convert}}
+ sc = sc - bc2;
+ uc = bc - uc2;
+
+ sc -= sc2;
+ uc -= uc2;
+ bc -= bc2; // expected-error {{invalid operands}}
+
+ sc -= uc2; // expected-error {{can't convert}}
+ uc -= bc2;
+ bc -= sc2; // expected-error {{can't convert}}
+
+ ss -= ss2;
+ us -= us2;
+ bs -= bs2; // expected-error {{invalid operands}}
+
+ si -= si2;
+ ui -= ui2;
+ bi -= bi2; // expected-error {{invalid operands}}
+
+ sl -= sl2;
+ ul -= ul2;
+ bl -= bl2; // expected-error {{invalid operands}}
+
+ fd -= fd2;
+
+ // -------------------------------------------------------------------------
+ // Test that binary + rules apply to * too. 64-bit integer multiplication
+ // is not required by the spec and so isn't tested here.
+ // -------------------------------------------------------------------------
+
+ sc = sc * sc2;
+ uc = uc * uc2;
+ bc = bc * bc2; // expected-error {{invalid operands}}
+
+ sc = uc * sc2; // expected-error {{can't convert}}
+ sc = sc * bc2; // expected-error {{can't convert}}
+ uc = bc * uc2; // expected-error {{can't convert}}
+
+ sc *= sc2;
+ uc *= uc2;
+ bc *= bc2; // expected-error {{invalid operands}}
+
+ sc *= uc2; // expected-error {{can't convert}}
+ uc *= bc2; // expected-error {{can't convert}}
+ bc *= sc2; // expected-error {{can't convert}}
+
+ ss *= ss2;
+ us *= us2;
+ bs *= bs2; // expected-error {{invalid operands}}
+
+ si *= si2;
+ ui *= ui2;
+ bi *= bi2; // expected-error {{invalid operands}}
+
+ sl *= sl2;
+ ul *= ul2;
+ bl *= bl2; // expected-error {{invalid operands}}
+
+ fd *= fd2;
+
+ // -------------------------------------------------------------------------
+ // Test that * rules apply to / too.
+ // -------------------------------------------------------------------------
+
+ sc = sc / sc2;
+ uc = uc / uc2;
+ bc = bc / bc2; // expected-error {{invalid operands}}
+
+ sc = uc / sc2; // expected-error {{can't convert}}
+ sc = sc / bc2; // expected-error {{can't convert}}
+ uc = bc / uc2; // expected-error {{can't convert}}
+
+ sc /= sc2;
+ uc /= uc2;
+ bc /= bc2; // expected-error {{invalid operands}}
+
+ sc /= uc2; // expected-error {{can't convert}}
+ uc /= bc2; // expected-error {{can't convert}}
+ bc /= sc2; // expected-error {{can't convert}}
+
+ ss /= ss2;
+ us /= us2;
+ bs /= bs2; // expected-error {{invalid operands}}
+
+ si /= si2;
+ ui /= ui2;
+ bi /= bi2; // expected-error {{invalid operands}}
+
+ sl /= sl2;
+ ul /= ul2;
+ bl /= bl2; // expected-error {{invalid operands}}
+
+ fd /= fd2;
+
+ // -------------------------------------------------------------------------
+ // Test that / rules apply to % too, except that doubles are not allowed.
+ // -------------------------------------------------------------------------
+
+ sc = sc % sc2;
+ uc = uc % uc2;
+ bc = bc % bc2; // expected-error {{invalid operands}}
+
+ sc = uc % sc2; // expected-error {{can't convert}}
+ sc = sc % bc2; // expected-error {{can't convert}}
+ uc = bc % uc2; // expected-error {{can't convert}}
+
+ sc %= sc2;
+ uc %= uc2;
+ bc %= bc2; // expected-error {{invalid operands}}
+
+ sc %= uc2; // expected-error {{can't convert}}
+ uc %= bc2; // expected-error {{can't convert}}
+ bc %= sc2; // expected-error {{can't convert}}
+
+ ss %= ss2;
+ us %= us2;
+ bs %= bs2; // expected-error {{invalid operands}}
+
+ si %= si2;
+ ui %= ui2;
+ bi %= bi2; // expected-error {{invalid operands}}
+
+ sl %= sl2;
+ ul %= ul2;
+ bl %= bl2; // expected-error {{invalid operands}}
+
+ fd %= fd2; // expected-error {{invalid operands}}
+
+ // -------------------------------------------------------------------------
+ // Test &.
+ // -------------------------------------------------------------------------
+
+ sc = sc & sc2;
+ sc = sc & uc2; // expected-error {{can't convert}}
+ sc = uc & sc2; // expected-error {{can't convert}}
+ sc = sc & bc2;
+ sc = bc & sc2;
+
+ uc = uc & uc2;
+ uc = sc & uc2; // expected-error {{can't convert}}
+ uc = uc & sc2; // expected-error {{can't convert}}
+ uc = bc & uc2;
+ uc = uc & bc2;
+
+ bc = bc & bc2;
+ bc = bc & uc2; // expected-error {{incompatible type}}
+ bc = uc & bc2; // expected-error {{incompatible type}}
+ bc = bc & sc2; // expected-error {{incompatible type}}
+ bc = sc & bc2; // expected-error {{incompatible type}}
+
+ fd = fd & fd2; // expected-error {{invalid operands}}
+ fd = bl & fd2; // expected-error {{invalid operands}}
+ fd = fd & bl2; // expected-error {{invalid operands}}
+ fd = fd & sl2; // expected-error {{invalid operands}}
+ fd = fd & ul2; // expected-error {{invalid operands}}
+
+ sc &= sc2;
+ sc &= uc2; // expected-error {{can't convert}}
+ sc &= bc2;
+
+ uc &= uc2;
+ uc &= sc2; // expected-error {{can't convert}}
+ uc &= bc2;
+
+ bc &= bc2;
+ bc &= sc2; // expected-error {{can't convert}}
+ bc &= uc2; // expected-error {{can't convert}}
+
+ sc &= ss2; // expected-error {{can't convert}}
+ sc &= si2; // expected-error {{can't convert}}
+ sc &= sl2; // expected-error {{can't convert}}
+ sc &= fd2; // expected-error {{invalid operands}}
+
+ us &= bc2; // expected-error {{can't convert}}
+ ui &= bc2; // expected-error {{can't convert}}
+ ul &= bc2; // expected-error {{can't convert}}
+ fd &= bc2; // expected-error {{invalid operands}}
+
+ ss &= ss2;
+ us &= us2;
+ bs &= bs2;
+
+ si &= si2;
+ ui &= ui2;
+ bi &= bi2;
+
+ sl &= sl2;
+ ul &= ul2;
+ bl &= bl2;
+
+ // -------------------------------------------------------------------------
+ // Test that & rules apply to | too.
+ // -------------------------------------------------------------------------
+
+ sc = sc | sc2;
+ sc = sc | uc2; // expected-error {{can't convert}}
+ sc = sc | bc2;
+
+ uc = uc | uc2;
+ uc = sc | uc2; // expected-error {{can't convert}}
+ uc = bc | uc2;
+
+ bc = bc | bc2;
+ bc = uc | bc2; // expected-error {{incompatible type}}
+ bc = bc | sc2; // expected-error {{incompatible type}}
+
+ fd = fd | fd2; // expected-error {{invalid operands}}
+ fd = bl | fd2; // expected-error {{invalid operands}}
+
+ ss |= ss2;
+ us |= us2;
+ bs |= bs2;
+
+ si |= si2;
+ ui |= ui2;
+ bi |= bi2;
+
+ sl |= sl2;
+ ul |= ul2;
+ bl |= bl2;
+
+ fd |= bl2; // expected-error {{invalid operands}}
+ fd |= fd2; // expected-error {{invalid operands}}
+
+ // -------------------------------------------------------------------------
+ // Test that & rules apply to ^ too.
+ // -------------------------------------------------------------------------
+
+ sc = sc ^ sc2;
+ sc = sc ^ uc2; // expected-error {{can't convert}}
+ sc = sc ^ bc2;
+
+ uc = uc ^ uc2;
+ uc = sc ^ uc2; // expected-error {{can't convert}}
+ uc = bc ^ uc2;
+
+ bc = bc ^ bc2;
+ bc = uc ^ bc2; // expected-error {{incompatible type}}
+ bc = bc ^ sc2; // expected-error {{incompatible type}}
+
+ fd = fd ^ fd2; // expected-error {{invalid operands}}
+ fd = bl ^ fd2; // expected-error {{invalid operands}}
+
+ ss ^= ss2;
+ us ^= us2;
+ bs ^= bs2;
+
+ si ^= si2;
+ ui ^= ui2;
+ bi ^= bi2;
+
+ sl ^= sl2;
+ ul ^= ul2;
+ bl ^= bl2;
+
+ fd ^= bl2; // expected-error {{invalid operands}}
+ fd ^= fd2; // expected-error {{invalid operands}}
+
+ // -------------------------------------------------------------------------
+ // Test <<.
+ // -------------------------------------------------------------------------
+
+ sc = sc << sc2;
+ sc = sc << uc2;
+ sc = uc << sc2; // expected-error {{incompatible type}}
+ sc = sc << bc2; // expected-error {{invalid operands}}
+ sc = bc << sc2; // expected-error {{invalid operands}}
+
+ uc = uc << uc2;
+ uc = sc << uc2; // expected-error {{assigning to}}
+ uc = uc << sc2;
+ uc = bc << uc2; // expected-error {{invalid operands}}
+ uc = uc << bc2; // expected-error {{invalid operands}}
+
+ bc = bc << bc2; // expected-error {{invalid operands}}
+ bc = bc << uc2; // expected-error {{invalid operands}}
+ bc = uc << bc2; // expected-error {{invalid operands}}
+ bc = bc << sc2; // expected-error {{invalid operands}}
+ bc = sc << bc2; // expected-error {{invalid operands}}
+
+ sc = sc << 1;
+ sc = sc << 1.0f; // expected-error {{integer is required}}
+ sc = sc << sc_scalar;
+ sc = sc << uc_scalar;
+ sc = sc << ss_scalar;
+ sc = sc << us_scalar;
+ sc = sc << si_scalar;
+ sc = sc << ui_scalar;
+ sc = sc << sl_scalar;
+ sc = sc << ul_scalar;
+ sc = sc_scalar << sc; // expected-error {{first operand is not a vector}}
+ sc = uc_scalar << sc; // expected-error {{first operand is not a vector}}
+ uc = uc << sc_scalar;
+ uc = uc << uc_scalar;
+ uc = sc_scalar << uc; // expected-error {{first operand is not a vector}}
+ uc = uc_scalar << uc; // expected-error {{first operand is not a vector}}
+
+ ss = ss << ss2;
+ ss = ss << ss_scalar;
+ us = us << us2;
+ us = us << us_scalar;
+ bs = bs << bs2; // expected-error {{invalid operands}}
+
+ si = si << si2;
+ si = si << si_scalar;
+ ui = ui << ui2;
+ ui = ui << ui_scalar;
+ bi = bi << bi2; // expected-error {{invalid operands}}
+
+ sl = sl << sl2;
+ sl = sl << sl_scalar;
+ ul = ul << ul2;
+ ul = ul << ul_scalar;
+ bl = bl << bl2; // expected-error {{invalid operands}}
+
+ fd = fd << fd2; // expected-error {{integer is required}}
+ fd = fd << ul2; // expected-error {{integer is required}}
+ fd = sl << fd2; // expected-error {{integer is required}}
+
+ sc <<= sc2;
+ sc <<= uc2;
+ sc <<= bc2; // expected-error {{invalid operands}}
+ sc <<= sc_scalar;
+
+ uc <<= uc2;
+ uc <<= sc2;
+ uc <<= bc2; // expected-error {{invalid operands}}
+ uc <<= uc_scalar;
+
+ bc <<= bc2; // expected-error {{invalid operands}}
+ bc <<= sc2; // expected-error {{invalid operands}}
+ bc <<= uc2; // expected-error {{invalid operands}}
+
+ sc <<= ss2; // expected-error {{vector operands do not have the same number of elements}}
+ sc <<= si2; // expected-error {{vector operands do not have the same number of elements}}
+ sc <<= sl2; // expected-error {{vector operands do not have the same number of elements}}
+ sc <<= fd2; // expected-error {{integer is required}}
+
+ ss <<= ss2;
+ ss <<= ss_scalar;
+ us <<= us2;
+ us <<= us_scalar;
+ bs <<= bs2; // expected-error {{invalid operands}}
+
+ si <<= si2;
+ si <<= si_scalar;
+ ui <<= ui2;
+ ui <<= ui_scalar;
+ bi <<= bi2; // expected-error {{invalid operands}}
+
+ sl <<= sl2;
+ sl <<= sl_scalar;
+ ul <<= ul2;
+ ul <<= ul_scalar;
+ bl <<= bl2; // expected-error {{invalid operands}}
+
+ fd <<= fd2; // expected-error {{integer is required}}
+
+ // -------------------------------------------------------------------------
+ // Test >>.
+ // -------------------------------------------------------------------------
+
+ sc = sc >> sc2;
+ sc = sc >> uc2;
+ sc = uc >> sc2; // expected-error {{incompatible type}}
+ sc = sc >> bc2; // expected-error {{invalid operands}}
+ sc = bc >> sc2; // expected-error {{invalid operands}}
+
+ uc = uc >> uc2;
+ uc = sc >> uc2; // expected-error {{assigning to}}
+ uc = uc >> sc2;
+ uc = bc >> uc2; // expected-error {{invalid operands}}
+ uc = uc >> bc2; // expected-error {{invalid operands}}
+
+ bc = bc >> bc2; // expected-error {{invalid operands}}
+ bc = bc >> uc2; // expected-error {{invalid operands}}
+ bc = uc >> bc2; // expected-error {{invalid operands}}
+ bc = bc >> sc2; // expected-error {{invalid operands}}
+ bc = sc >> bc2; // expected-error {{invalid operands}}
+
+ sc = sc >> 1;
+ sc = sc >> 1.0f; // expected-error {{integer is required}}
+ sc = sc >> sc_scalar;
+ sc = sc >> uc_scalar;
+ sc = sc >> ss_scalar;
+ sc = sc >> us_scalar;
+ sc = sc >> si_scalar;
+ sc = sc >> ui_scalar;
+ sc = sc >> sl_scalar;
+ sc = sc >> ul_scalar;
+ sc = sc_scalar >> sc; // expected-error {{first operand is not a vector}}
+ sc = uc_scalar >> sc; // expected-error {{first operand is not a vector}}
+ uc = uc >> sc_scalar;
+ uc = uc >> uc_scalar;
+ uc = sc_scalar >> uc; // expected-error {{first operand is not a vector}}
+ uc = uc_scalar >> uc; // expected-error {{first operand is not a vector}}
+
+ ss = ss >> ss2;
+ ss = ss >> ss_scalar;
+ us = us >> us2;
+ us = us >> us_scalar;
+ bs = bs >> bs2; // expected-error {{invalid operands}}
+
+ si = si >> si2;
+ si = si >> si_scalar;
+ ui = ui >> ui2;
+ ui = ui >> ui_scalar;
+ bi = bi >> bi2; // expected-error {{invalid operands}}
+
+ sl = sl >> sl2;
+ sl = sl >> sl_scalar;
+ ul = ul >> ul2;
+ ul = ul >> ul_scalar;
+ bl = bl >> bl2; // expected-error {{invalid operands}}
+
+ fd = fd >> fd2; // expected-error {{integer is required}}
+ fd = fd >> ul2; // expected-error {{integer is required}}
+ fd = sl >> fd2; // expected-error {{integer is required}}
+
+ sc >>= sc2;
+ sc >>= uc2;
+ sc >>= bc2; // expected-error {{invalid operands}}
+ sc >>= sc_scalar;
+
+ uc >>= uc2;
+ uc >>= sc2;
+ uc >>= bc2; // expected-error {{invalid operands}}
+ uc >>= uc_scalar;
+
+ bc >>= bc2; // expected-error {{invalid operands}}
+ bc >>= sc2; // expected-error {{invalid operands}}
+ bc >>= uc2; // expected-error {{invalid operands}}
+
+ sc >>= ss2; // expected-error {{vector operands do not have the same number of elements}}
+ sc >>= si2; // expected-error {{vector operands do not have the same number of elements}}
+ sc >>= sl2; // expected-error {{vector operands do not have the same number of elements}}
+ sc >>= fd2; // expected-error {{integer is required}}
+
+ ss >>= ss2;
+ ss >>= ss_scalar;
+ us >>= us2;
+ us >>= us_scalar;
+ bs >>= bs2; // expected-error {{invalid operands}}
+
+ si >>= si2;
+ si >>= si_scalar;
+ ui >>= ui2;
+ ui >>= ui_scalar;
+ bi >>= bi2; // expected-error {{invalid operands}}
+
+ sl >>= sl2;
+ sl >>= sl_scalar;
+ ul >>= ul2;
+ ul >>= ul_scalar;
+ bl >>= bl2; // expected-error {{invalid operands}}
+
+ fd >>= fd2; // expected-error {{integer is required}}
+
+ // -------------------------------------------------------------------------
+ // Test ==.
+ // -------------------------------------------------------------------------
+
+ (void)(sc == sc2);
+ (void)(uc == uc2);
+ (void)(bc == bc2);
+
+ (void)(sc == uc); // expected-error {{can't convert}}
+ (void)(sc == bc);
+
+ (void)(uc == sc); // expected-error {{can't convert}}
+ (void)(uc == bc);
+
+ (void)(bc == sc);
+ (void)(bc == uc);
+
+ (void)(ss == ss2);
+ (void)(us == us2);
+ (void)(bs == bs2);
+
+ (void)(si == si2);
+ (void)(ui == ui2);
+ (void)(bi == bi2);
+
+ (void)(sl == sl2);
+ (void)(ul == ul2);
+ (void)(bl == bl2);
+ (void)(fd == fd2);
+
+ (void)(fd == ul); // expected-error {{can't convert}}
+ (void)(ul == fd); // expected-error {{can't convert}}
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to != too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc != sc2);
+ (void)(uc != uc2);
+ (void)(bc != bc2);
+
+ (void)(sc != uc); // expected-error {{can't convert}}
+ (void)(sc != bc);
+
+ (void)(ss != ss2);
+ (void)(us != us2);
+ (void)(bs != bs2);
+
+ (void)(si != si2);
+ (void)(ui != ui2);
+ (void)(bi != bi2);
+
+ (void)(sl != sl2);
+ (void)(ul != ul2);
+ (void)(bl != bl2);
+ (void)(fd != fd2);
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to <= too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc <= sc2);
+ (void)(uc <= uc2);
+ (void)(bc <= bc2);
+
+ (void)(sc <= uc); // expected-error {{can't convert}}
+ (void)(sc <= bc);
+
+ (void)(ss <= ss2);
+ (void)(us <= us2);
+ (void)(bs <= bs2);
+
+ (void)(si <= si2);
+ (void)(ui <= ui2);
+ (void)(bi <= bi2);
+
+ (void)(sl <= sl2);
+ (void)(ul <= ul2);
+ (void)(bl <= bl2);
+ (void)(fd <= fd2);
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to >= too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc >= sc2);
+ (void)(uc >= uc2);
+ (void)(bc >= bc2);
+
+ (void)(sc >= uc); // expected-error {{can't convert}}
+ (void)(sc >= bc);
+
+ (void)(ss >= ss2);
+ (void)(us >= us2);
+ (void)(bs >= bs2);
+
+ (void)(si >= si2);
+ (void)(ui >= ui2);
+ (void)(bi >= bi2);
+
+ (void)(sl >= sl2);
+ (void)(ul >= ul2);
+ (void)(bl >= bl2);
+ (void)(fd >= fd2);
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to < too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc < sc2);
+ (void)(uc < uc2);
+ (void)(bc < bc2);
+
+ (void)(sc < uc); // expected-error {{can't convert}}
+ (void)(sc < bc);
+
+ (void)(ss < ss2);
+ (void)(us < us2);
+ (void)(bs < bs2);
+
+ (void)(si < si2);
+ (void)(ui < ui2);
+ (void)(bi < bi2);
+
+ (void)(sl < sl2);
+ (void)(ul < ul2);
+ (void)(bl < bl2);
+ (void)(fd < fd2);
+
+ // -------------------------------------------------------------------------
+ // Test that == rules apply to > too.
+ // -------------------------------------------------------------------------
+
+ (void)(sc > sc2);
+ (void)(uc > uc2);
+ (void)(bc > bc2);
+
+ (void)(sc > uc); // expected-error {{can't convert}}
+ (void)(sc > bc);
+
+ (void)(ss > ss2);
+ (void)(us > us2);
+ (void)(bs > bs2);
+
+ (void)(si > si2);
+ (void)(ui > ui2);
+ (void)(bi > bi2);
+
+ (void)(sl > sl2);
+ (void)(ul > ul2);
+ (void)(bl > bl2);
+ (void)(fd > fd2);
+}
diff --git a/test/SemaCXX/warn-pessmizing-move.cpp b/test/SemaCXX/warn-pessmizing-move.cpp
index 6b49944f2878..71e99dca007b 100644
--- a/test/SemaCXX/warn-pessmizing-move.cpp
+++ b/test/SemaCXX/warn-pessmizing-move.cpp
@@ -23,10 +23,6 @@ A test1(A a1) {
return a1;
return a2;
return std::move(a1);
- // expected-warning@-1{{prevents copy elision}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(a2);
// expected-warning@-1{{prevents copy elision}}
// expected-note@-2{{remove std::move call}}
@@ -46,10 +42,6 @@ B test2(A a1, B b1) {
return b1;
return b2;
return std::move(b1);
- // expected-warning@-1{{prevents copy elision}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(b2);
// expected-warning@-1{{prevents copy elision}}
// expected-note@-2{{remove std::move call}}
@@ -163,9 +155,10 @@ A test7() {
#define wrap1(x) x
#define wrap2(x) x
-// Macro test. Since the std::move call is outside the macro, it is
+// Macro test. Since the std::move call is outside the macro, it is
// safe to suggest a fix-it.
-A test8(A a) {
+A test8() {
+ A a;
return std::move(a);
// expected-warning@-1{{prevents copy elision}}
// expected-note@-2{{remove std::move call}}
@@ -184,7 +177,8 @@ A test8(A a) {
}
#define test9 \
- A test9(A a) { \
+ A test9() { \
+ A a; \
return std::move(a); \
}
@@ -196,8 +190,40 @@ test9
#define return_a return std::move(a)
// Macro test. The std::call is inside the macro, so no fix-it is suggested.
-A test10(A a) {
+A test10() {
+ A a;
return_a;
// expected-warning@-1{{prevents copy elision}}
// CHECK-NOT: fix-it
}
+
+namespace templates {
+ struct A {};
+ struct B { B(A); };
+
+ // Warn once here since the type is not dependent.
+ template <typename T>
+ A test1() {
+ A a;
+ return std::move(a);
+ // expected-warning@-1{{prevents copy elision}}
+ // expected-note@-2{{remove std::move call}}
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:12-[[@LINE-3]]:22}:""
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:23-[[@LINE-4]]:24}:""
+ }
+ void run_test1() {
+ test1<A>();
+ test1<B>();
+ }
+
+ // T1 and T2 may not be the same, the warning may not always apply.
+ template <typename T1, typename T2>
+ T1 test2() {
+ T2 t;
+ return std::move(t);
+ }
+ void run_test2() {
+ test2<A, A>();
+ test2<B, A>();
+ }
+}
diff --git a/test/SemaCXX/warn-redundant-move.cpp b/test/SemaCXX/warn-redundant-move.cpp
index feb9e6f408d9..abfb001fa8e6 100644
--- a/test/SemaCXX/warn-redundant-move.cpp
+++ b/test/SemaCXX/warn-redundant-move.cpp
@@ -1,5 +1,6 @@
// RUN: %clang_cc1 -fsyntax-only -Wredundant-move -std=c++11 -verify %s
// RUN: %clang_cc1 -fsyntax-only -Wredundant-move -std=c++11 -fdiagnostics-parseable-fixits %s 2>&1 | FileCheck %s
+// RUN: %clang_cc1 -fsyntax-only -std=c++11 %s -ast-dump | FileCheck %s --check-prefix=CHECK-AST
// definitions for std::move
namespace std {
@@ -12,6 +13,7 @@ template <class T> typename remove_reference<T>::type &&move(T &&t);
}
}
+// test1 and test2 should not warn until after implementation of DR1579.
struct A {};
struct B : public A {};
@@ -20,15 +22,7 @@ A test1(B b1) {
return b1;
return b2;
return std::move(b1);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(b2);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
}
struct C {
@@ -46,25 +40,9 @@ C test2(A a1, B b1) {
return b2;
return std::move(a1);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(a2);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(b1);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
return std::move(b2);
- // expected-warning@-1{{redundant move}}
- // expected-note@-2{{remove std::move call}}
- // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
- // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:22-[[@LINE-4]]:23}:""
}
// Copy of tests above with types changed to reference types.
@@ -90,3 +68,49 @@ C test4(A& a1, B& b1) {
return std::move(b1);
return std::move(b2);
}
+
+// PR23819, case 2
+struct D {};
+D test5(D d) {
+ return d;
+ // Verify the implicit move from the AST dump
+ // CHECK-AST: ReturnStmt{{.*}}line:[[@LINE-2]]
+ // CHECK-AST-NEXT: CXXConstructExpr{{.*}}struct D{{.*}}void (struct D &&)
+ // CHECK-AST-NEXT: ImplicitCastExpr
+ // CHECK-AST-NEXT: DeclRefExpr{{.*}}ParmVar{{.*}}'d'
+
+ return std::move(d);
+ // expected-warning@-1{{redundant move in return statement}}
+ // expected-note@-2{{remove std::move call here}}
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:10-[[@LINE-3]]:20}:""
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:21-[[@LINE-4]]:22}:""
+}
+
+namespace templates {
+ struct A {};
+ struct B { B(A); };
+
+ // Warn once here since the type is not dependent.
+ template <typename T>
+ A test1(A a) {
+ return std::move(a);
+ // expected-warning@-1{{redundant move in return statement}}
+ // expected-note@-2{{remove std::move call here}}
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-3]]:12-[[@LINE-3]]:22}:""
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-4]]:23-[[@LINE-4]]:24}:""
+ }
+ void run_test1() {
+ test1<A>(A());
+ test1<B>(A());
+ }
+
+ // T1 and T2 may not be the same, the warning may not always apply.
+ template <typename T1, typename T2>
+ T1 test2(T2 t) {
+ return std::move(t);
+ }
+ void run_test2() {
+ test2<A, A>(A());
+ test2<B, A>(A());
+ }
+}
diff --git a/test/SemaObjC/circular-container.m b/test/SemaObjC/circular-container.m
index 1a2a24e8985a..1f06595f77be 100644
--- a/test/SemaObjC/circular-container.m
+++ b/test/SemaObjC/circular-container.m
@@ -144,3 +144,64 @@ void checkNSMutableOrderedSet() {
[s replaceObjectAtIndex:0 withObject:s]; // expected-warning {{adding 's' to 's' might cause circular dependency in container}}
}
+// Test subclassing
+
+@interface FootableSet : NSMutableSet
+@end
+
+@implementation FootableSet
+- (void)meth {
+ [super addObject:self]; // expected-warning {{adding 'self' to 'super' might cause circular dependency in container}}
+ [super addObject:nil]; // no-warning
+ [self addObject:self]; // expected-warning {{adding 'self' to 'self' might cause circular dependency in container}}
+}
+@end
+
+@interface FootableArray : NSMutableArray
+@end
+
+@implementation FootableArray
+- (void)meth {
+ [super addObject:self]; // expected-warning {{adding 'self' to 'super' might cause circular dependency in container}}
+ [super addObject:nil]; // no-warning
+ [self addObject:self]; // expected-warning {{adding 'self' to 'self' might cause circular dependency in container}}
+}
+@end
+
+@interface FootableDictionary : NSMutableDictionary
+@end
+
+@implementation FootableDictionary
+- (void)meth {
+ [super setObject:self forKey:@"key"]; // expected-warning {{adding 'self' to 'super' might cause circular dependency in container}}
+ [super setObject:nil forKey:@"key"]; // no-warning
+ [self setObject:self forKey:@"key"]; // expected-warning {{adding 'self' to 'self' might cause circular dependency in container}}
+}
+@end
+
+
+void subclassingNSMutableArray() {
+ FootableArray *a = nil; // expected-note 5 {{'a' declared here}} 5
+
+ [a addObject:a]; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+ [a insertObject:a atIndex:0]; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+ [a replaceObjectAtIndex:0 withObject:a]; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+ [a setObject:a atIndexedSubscript:0]; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+ a[0] = a; // expected-warning {{adding 'a' to 'a' might cause circular dependency in container}}
+}
+
+void subclassingNSMutableDictionary() {
+ FootableDictionary *d = nil; // expected-note 4 {{'d' declared here}}
+
+ [d setObject:d forKey:@"key"]; // expected-warning {{adding 'd' to 'd' might cause circular dependency in container}}
+ [d setObject:d forKeyedSubscript:@"key"]; // expected-warning {{adding 'd' to 'd' might cause circular dependency in container}}
+ [d setValue:d forKey:@"key"]; // expected-warning {{adding 'd' to 'd' might cause circular dependency in container}}
+ d[@"key"] = d; // expected-warning {{adding 'd' to 'd' might cause circular dependency in container}}
+}
+
+void subclassingNSMutableSet() {
+ FootableSet *s = nil; // expected-note {{'s' declared here}}
+
+ [s addObject:s]; // expected-warning {{adding 's' to 's' might cause circular dependency in container}}
+}
+
diff --git a/test/SemaObjC/comptypes-9.m b/test/SemaObjC/comptypes-9.m
index cc6932dcdbf3..7064f167aaf3 100644
--- a/test/SemaObjC/comptypes-9.m
+++ b/test/SemaObjC/comptypes-9.m
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only %s
-// FIXME: This test case tests the patch applied in: http://lists.cs.uiuc.edu/pipermail/cfe-commits/Week-of-Mon-20080602/006017.html
+// FIXME: This test case tests the patch applied in: http://lists.llvm.org/pipermail/cfe-commits/Week-of-Mon-20080602/006017.html
// Eventually that logic should be treated as an extension.
typedef signed char BOOL;
diff --git a/test/SemaObjC/objc-boxed-expressions-nsvalue.m b/test/SemaObjC/objc-boxed-expressions-nsvalue.m
new file mode 100644
index 000000000000..1599f28c7944
--- /dev/null
+++ b/test/SemaObjC/objc-boxed-expressions-nsvalue.m
@@ -0,0 +1,101 @@
+// RUN: %clang_cc1 -fsyntax-only -triple x86_64-apple-macosx10.9 -verify %s
+
+#define BOXABLE __attribute__((objc_boxable))
+
+typedef struct BOXABLE _NSPoint {
+ int dummy;
+} NSPoint;
+
+typedef struct BOXABLE _NSSize {
+ int dummy;
+} NSSize;
+
+typedef struct BOXABLE _NSRect {
+ int dummy;
+} NSRect;
+
+typedef struct BOXABLE _CGPoint {
+ int dummy;
+} CGPoint;
+
+typedef struct BOXABLE _CGSize {
+ int dummy;
+} CGSize;
+
+typedef struct BOXABLE _CGRect {
+ int dummy;
+} CGRect;
+
+typedef struct BOXABLE _NSRange {
+ int dummy;
+} NSRange;
+
+struct _NSEdgeInsets {
+ int dummy;
+};
+
+typedef struct BOXABLE _NSEdgeInsets NSEdgeInsets;
+
+typedef struct _SomeStruct {
+ double d;
+} SomeStruct;
+
+typedef union BOXABLE _BoxableUnion {
+ int dummy;
+} BoxableUnion;
+
+void checkNSValueDiagnostic() {
+ NSRect rect;
+ id value = @(rect); // expected-error{{NSValue must be available to use Objective-C boxed expressions}}
+}
+
+@interface NSValue
++ (NSValue *)valueWithBytes:(const void *)value objCType:(const char *)type;
+@end
+
+int main() {
+ NSPoint ns_point;
+ id ns_point_value = @(ns_point);
+
+ NSSize ns_size;
+ id ns_size_value = @(ns_size);
+
+ NSRect ns_rect;
+ id ns_rect_value = @(ns_rect);
+
+ CGPoint cg_point;
+ id cg_point_value = @(cg_point);
+
+ CGSize cg_size;
+ id cg_size_value = @(cg_size);
+
+ CGRect cg_rect;
+ id cg_rect_value = @(cg_rect);
+
+ NSRange ns_range;
+ id ns_range_value = @(ns_range);
+
+ NSEdgeInsets edge_insets;
+ id edge_insets_object = @(edge_insets);
+
+ BoxableUnion boxable_union;
+ id boxed_union = @(boxable_union);
+
+ SomeStruct s;
+ id err = @(s); // expected-error{{illegal type 'SomeStruct' (aka 'struct _SomeStruct') used in a boxed expression}}
+}
+
+CGRect getRect() {
+ CGRect r;
+ return r;
+}
+
+SomeStruct getSomeStruct() {
+ SomeStruct s;
+ return s;
+}
+
+void rvalue() {
+ id rv_rect = @(getRect());
+ id rv_some_struct = @(getSomeStruct()); // expected-error {{illegal type 'SomeStruct' (aka 'struct _SomeStruct') used in a boxed expression}}
+}
diff --git a/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm b/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
new file mode 100644
index 000000000000..375b9555fcaf
--- /dev/null
+++ b/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
@@ -0,0 +1,118 @@
+// RUN: %clang_cc1 -fsyntax-only -triple x86_64-apple-macosx10.9 -verify %s
+
+#define BOXABLE __attribute__((objc_boxable))
+
+typedef struct BOXABLE _NSPoint {
+ int dummy;
+} NSPoint;
+
+typedef struct BOXABLE _NSSize {
+ int dummy;
+} NSSize;
+
+typedef struct BOXABLE _NSRect {
+ int dummy;
+} NSRect;
+
+typedef struct BOXABLE _CGPoint {
+ int dummy;
+} CGPoint;
+
+typedef struct BOXABLE _CGSize {
+ int dummy;
+} CGSize;
+
+typedef struct BOXABLE _CGRect {
+ int dummy;
+} CGRect;
+
+typedef struct BOXABLE _NSRange {
+ int dummy;
+} NSRange;
+
+typedef struct BOXABLE _NSEdgeInsets {
+ int dummy;
+} NSEdgeInsets;
+
+typedef struct BOXABLE _NSEdgeInsets NSEdgeInsets;
+
+typedef struct _SomeStruct {
+ double d;
+} SomeStruct;
+
+struct BOXABLE NonTriviallyCopyable {
+ double d;
+ NonTriviallyCopyable() {}
+ NonTriviallyCopyable(const NonTriviallyCopyable &obj) {}
+};
+
+void checkNSValueDiagnostic() {
+ NSRect rect;
+ id value = @(rect); // expected-error{{NSValue must be available to use Objective-C boxed expressions}}
+}
+
+@interface NSValue
++ (NSValue *)valueWithBytes:(const void *)value objCType:(const char *)type;
+@end
+
+int main() {
+ NSPoint ns_point;
+ id ns_point_value = @(ns_point);
+
+ NSSize ns_size;
+ id ns_size_value = @(ns_size);
+
+ NSRect ns_rect;
+ id ns_rect_value = @(ns_rect);
+
+ CGPoint cg_point;
+ id cg_point_value = @(cg_point);
+
+ CGSize cg_size;
+ id cg_size_value = @(cg_size);
+
+ CGRect cg_rect;
+ id cg_rect_value = @(cg_rect);
+
+ NSRange ns_range;
+ id ns_range_value = @(ns_range);
+
+ NSEdgeInsets edge_insets;
+ id edge_insets_object = @(edge_insets);
+
+ SomeStruct s;
+ id err = @(s); // expected-error{{illegal type 'SomeStruct' (aka '_SomeStruct') used in a boxed expression}}
+
+ NonTriviallyCopyable ntc;
+ id ntcErr = @(ntc); // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
+}
+
+CGRect getRect() {
+ CGRect r;
+ return r;
+}
+
+SomeStruct getSomeStruct() {
+ SomeStruct s;
+ return s;
+}
+
+void rvalue() {
+ id rv_rect = @(getRect());
+ id rv_some_struct = @(getSomeStruct()); // expected-error {{illegal type 'SomeStruct' (aka '_SomeStruct') used in a boxed expression}}
+}
+
+template <class T> id box(T value) { return @(value); } // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
+void test_template_1(NSRect rect, NonTriviallyCopyable ntc) {
+ id x = box(rect);
+ id y = box(ntc); // expected-note{{in instantiation of function template specialization 'box<NonTriviallyCopyable>' requested here}}
+}
+
+template <unsigned i> id boxRect(NSRect rect) { return @(rect); }
+template <unsigned i> id boxNTC(NonTriviallyCopyable ntc) { return @(ntc); } // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
+void test_template_2(NSRect rect, NonTriviallyCopyable ntc) {
+ id x = boxRect<0>(rect);
+ id y = boxNTC<0>(ntc);
+}
+
+
diff --git a/test/SemaTemplate/crash-unparsed-exception.cpp b/test/SemaTemplate/crash-unparsed-exception.cpp
new file mode 100644
index 000000000000..1226b35deb70
--- /dev/null
+++ b/test/SemaTemplate/crash-unparsed-exception.cpp
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify -fcxx-exceptions -fexceptions %s
+
+struct A {
+ virtual ~A();
+};
+template <class>
+struct B {};
+struct C {
+ template <typename>
+ struct D {
+ ~D() throw();
+ };
+ struct E : A {
+ D<int> d; //expected-error{{exception specification is not available until end of class definition}}
+ };
+ B<int> b; //expected-note{{in instantiation of template class 'B<int>' requested here}}
+};