aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/avx-shuffle-builtins.c
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/avx-shuffle-builtins.c')
-rw-r--r--test/CodeGen/avx-shuffle-builtins.c118
1 files changed, 115 insertions, 3 deletions
diff --git a/test/CodeGen/avx-shuffle-builtins.c b/test/CodeGen/avx-shuffle-builtins.c
index 76e2395fe8e3..913f9d238130 100644
--- a/test/CodeGen/avx-shuffle-builtins.c
+++ b/test/CodeGen/avx-shuffle-builtins.c
@@ -48,19 +48,19 @@ __m256 test_mm256_permute_ps(__m256 a) {
__m256d test_mm256_permute2f128_pd(__m256d a, __m256d b) {
// Check if the mask is correct
- // CHECK: @llvm.x86.avx.vperm2f128.pd.256
+ // CHECK: shufflevector{{.*}}<i32 2, i32 3, i32 6, i32 7>
return _mm256_permute2f128_pd(a, b, 0x31);
}
__m256 test_mm256_permute2f128_ps(__m256 a, __m256 b) {
// Check if the mask is correct
- // CHECK: @llvm.x86.avx.vperm2f128.ps.256
+ // CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
return _mm256_permute2f128_ps(a, b, 0x13);
}
__m256i test_mm256_permute2f128_si256(__m256i a, __m256i b) {
// Check if the mask is correct
- // CHECK: @llvm.x86.avx.vperm2f128.si.256
+ // CHECK: shufflevector{{.*}} <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
return _mm256_permute2f128_si256(a, b, 0x20);
}
@@ -97,3 +97,115 @@ test_mm256_broadcast_ss(float const *__a) {
// CHECK: insertelement <8 x float> {{.*}}, i32 7
return _mm256_broadcast_ss(__a);
}
+
+// Make sure we have the correct mask for each insertf128 case.
+
+__m256 test_mm256_insertf128_ps_0(__m256 a, __m128 b) {
+ // CHECK-LABEL: @test_mm256_insertf128_ps_0
+ // CHECK: shufflevector{{.*}}<i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+ return _mm256_insertf128_ps(a, b, 0);
+}
+
+__m256d test_mm256_insertf128_pd_0(__m256d a, __m128d b) {
+ // CHECK-LABEL: @test_mm256_insertf128_pd_0
+ // CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 2, i32 3>
+ return _mm256_insertf128_pd(a, b, 0);
+}
+
+__m256i test_mm256_insertf128_si256_0(__m256i a, __m128i b) {
+ // CHECK-LABEL: @test_mm256_insertf128_si256_0
+ // CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 2, i32 3>
+ return _mm256_insertf128_si256(a, b, 0);
+}
+
+__m256 test_mm256_insertf128_ps_1(__m256 a, __m128 b) {
+ // CHECK-LABEL: @test_mm256_insertf128_ps_1
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+ return _mm256_insertf128_ps(a, b, 1);
+}
+
+__m256d test_mm256_insertf128_pd_1(__m256d a, __m128d b) {
+ // CHECK-LABEL: @test_mm256_insertf128_pd_1
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 4, i32 5>
+ return _mm256_insertf128_pd(a, b, 1);
+}
+
+__m256i test_mm256_insertf128_si256_1(__m256i a, __m128i b) {
+ // CHECK-LABEL: @test_mm256_insertf128_si256_1
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 4, i32 5>
+ return _mm256_insertf128_si256(a, b, 1);
+}
+
+// Make sure we have the correct mask for each extractf128 case.
+
+__m128 test_mm256_extractf128_ps_0(__m256 a) {
+ // CHECK-LABEL: @test_mm256_extractf128_ps_0
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>
+ return _mm256_extractf128_ps(a, 0);
+}
+
+__m128d test_mm256_extractf128_pd_0(__m256d a) {
+ // CHECK-LABEL: @test_mm256_extractf128_pd_0
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1>
+ return _mm256_extractf128_pd(a, 0);
+}
+
+__m128i test_mm256_extractf128_si256_0(__m256i a) {
+ // CHECK-LABEL: @test_mm256_extractf128_si256_0
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1>
+ return _mm256_extractf128_si256(a, 0);
+}
+
+__m128 test_mm256_extractf128_ps_1(__m256 a) {
+ // CHECK-LABEL: @test_mm256_extractf128_ps_1
+ // CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 6, i32 7>
+ return _mm256_extractf128_ps(a, 1);
+}
+
+__m128d test_mm256_extractf128_pd_1(__m256d a) {
+ // CHECK-LABEL: @test_mm256_extractf128_pd_1
+ // CHECK: shufflevector{{.*}}<i32 2, i32 3>
+ return _mm256_extractf128_pd(a, 1);
+}
+
+__m128i test_mm256_extractf128_si256_1(__m256i a) {
+ // CHECK-LABEL: @test_mm256_extractf128_si256_1
+ // CHECK: shufflevector{{.*}}<i32 2, i32 3>
+ return _mm256_extractf128_si256(a, 1);
+}
+
+__m256 test_mm256_set_m128(__m128 hi, __m128 lo) {
+ // CHECK-LABEL: @test_mm256_set_m128
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ return _mm256_set_m128(hi, lo);
+}
+
+__m256d test_mm256_set_m128d(__m128d hi, __m128d lo) {
+ // CHECK-LABEL: @test_mm256_set_m128d
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ return _mm256_set_m128d(hi, lo);
+}
+
+__m256i test_mm256_set_m128i(__m128i hi, __m128i lo) {
+ // CHECK-LABEL: @test_mm256_set_m128i
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ return _mm256_set_m128i(hi, lo);
+}
+
+__m256 test_mm256_setr_m128(__m128 hi, __m128 lo) {
+ // CHECK-LABEL: @test_mm256_setr_m128
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ return _mm256_setr_m128(lo, hi);
+}
+
+__m256d test_mm256_setr_m128d(__m128d hi, __m128d lo) {
+ // CHECK-LABEL: @test_mm256_setr_m128d
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ return _mm256_setr_m128d(lo, hi);
+}
+
+__m256i test_mm256_setr_m128i(__m128i hi, __m128i lo) {
+ // CHECK-LABEL: @test_mm256_setr_m128i
+ // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ return _mm256_setr_m128i(lo, hi);
+}