aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/avx512-insert-extract.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/avx512-insert-extract.ll')
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll479
1 files changed, 456 insertions, 23 deletions
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index 2c42aca33e45..c6cc74289971 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -201,10 +201,11 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; KNL-NEXT: kshiftlw $11, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: andl $1, %eax
; KNL-NEXT: testb %al, %al
; KNL-NEXT: je LBB10_2
; KNL-NEXT: ## BB#1: ## %A
-; KNL-NEXT: vmovaps %zmm1, %zmm0
+; KNL-NEXT: vmovdqa64 %zmm1, %zmm0
; KNL-NEXT: retq
; KNL-NEXT: LBB10_2: ## %B
; KNL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
@@ -216,10 +217,11 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; SKX-NEXT: kshiftlw $11, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: andl $1, %eax
; SKX-NEXT: testb %al, %al
; SKX-NEXT: je LBB10_2
; SKX-NEXT: ## BB#1: ## %A
-; SKX-NEXT: vmovaps %zmm1, %zmm0
+; SKX-NEXT: vmovdqa64 %zmm1, %zmm0
; SKX-NEXT: retq
; SKX-NEXT: LBB10_2: ## %B
; SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
@@ -243,6 +245,7 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; KNL-NEXT: kshiftlw $15, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: andl $1, %eax
; KNL-NEXT: testb %al, %al
; KNL-NEXT: cmoveq %rsi, %rdi
; KNL-NEXT: movq %rdi, %rax
@@ -256,6 +259,7 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: andl $1, %eax
; SKX-NEXT: testb %al, %al
; SKX-NEXT: cmoveq %rsi, %rdi
; SKX-NEXT: movq %rdi, %rax
@@ -271,24 +275,28 @@ define i16 @test13(i32 %a, i32 %b) {
; KNL: ## BB#0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
+; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: movw $-4, %ax
; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kshiftrw $1, %k1, %k1
+; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
+; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovw %eax, %k0
; SKX-NEXT: movw $-4, %ax
; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kshiftrw $1, %k1, %k1
+; SKX-NEXT: kshiftlw $1, %k1, %k1
; SKX-NEXT: korw %k0, %k1, %k0
; SKX-NEXT: kmovw %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: retq
%cmp_res = icmp ult i32 %a, %b
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %cmp_res, i32 0
@@ -303,6 +311,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; KNL-NEXT: kshiftlw $11, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: andl $1, %eax
; KNL-NEXT: testb %al, %al
; KNL-NEXT: cmoveq %rsi, %rdi
; KNL-NEXT: movq %rdi, %rax
@@ -314,6 +323,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; SKX-NEXT: kshiftlb $3, %k0, %k0
; SKX-NEXT: kshiftrb $7, %k0, %k0
; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: andl $1, %eax
; SKX-NEXT: testb %al, %al
; SKX-NEXT: cmoveq %rsi, %rdi
; SKX-NEXT: movq %rdi, %rax
@@ -353,12 +363,16 @@ define i16 @test16(i1 *%addr, i16 %a) {
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: kmovw %esi, %k1
-; KNL-NEXT: kshiftlw $10, %k0, %k0
-; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
+; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k2} {z}
+; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
+; KNL-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; KNL-NEXT: vpslld $31, %zmm2, %zmm0
+; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test16:
@@ -367,10 +381,12 @@ define i16 @test16(i1 *%addr, i16 %a) {
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kmovw %esi, %k1
-; SKX-NEXT: kshiftlw $10, %k0, %k0
-; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: vpmovm2d %k1, %zmm0
+; SKX-NEXT: vpmovm2d %k0, %zmm1
+; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
+; SKX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; SKX-NEXT: vpmovd2m %zmm2, %k0
; SKX-NEXT: kmovw %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
%a1 = bitcast i16 %a to <16 x i1>
@@ -384,12 +400,16 @@ define i8 @test17(i1 *%addr, i8 %a) {
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: kmovw %esi, %k1
-; KNL-NEXT: kshiftlw $4, %k0, %k0
-; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
+; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2} {z}
+; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
+; KNL-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
+; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test17:
@@ -398,10 +418,12 @@ define i8 @test17(i1 *%addr, i8 %a) {
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kmovb %esi, %k1
-; SKX-NEXT: kshiftlb $4, %k0, %k0
-; SKX-NEXT: korb %k0, %k1, %k0
+; SKX-NEXT: vpmovm2q %k1, %zmm0
+; SKX-NEXT: vpmovm2q %k0, %zmm1
+; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
+; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kmovb %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
%a1 = bitcast i8 %a to <8 x i1>
@@ -705,18 +727,18 @@ define <4 x i64> @insert_v4i64(<4 x i64> %x, i64 %y , i64* %ptr) {
define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) {
; KNL-LABEL: insert_v2i64:
; KNL: ## BB#0:
+; KNL-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm0
; KNL-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm0
-; KNL-NEXT: vpinsrq $3, %rdi, %xmm0, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v2i64:
; SKX: ## BB#0:
+; SKX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm0
; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm0
-; SKX-NEXT: vpinsrq $3, %rdi, %xmm0, %xmm0
; SKX-NEXT: retq
%val = load i64, i64* %ptr
%r1 = insertelement <2 x i64> %x, i64 %val, i32 1
- %r2 = insertelement <2 x i64> %r1, i64 %y, i32 3
+ %r2 = insertelement <2 x i64> %r1, i64 %y, i32 0
ret <2 x i64> %r2
}
@@ -1017,3 +1039,414 @@ define <32 x i8> @test_insert_128_v32i8(<32 x i8> %x, i8 %y) {
%r = insertelement <32 x i8> %x, i8 %y, i32 20
ret <32 x i8> %r
}
+
+define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32> %y) {
+; KNL-LABEL: test_insertelement_v32i1:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi0:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi1:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi2:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $32, %rsp
+; KNL-NEXT: cmpl %esi, %edi
+; KNL-NEXT: vpcmpltud %zmm3, %zmm1, %k0
+; KNL-NEXT: kshiftlw $14, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: kshiftlw $15, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vmovd %ecx, %xmm1
+; KNL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $13, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $12, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $11, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $10, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $9, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $8, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $7, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $6, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $5, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $4, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $3, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $2, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftlw $1, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; KNL-NEXT: kshiftrw $15, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; KNL-NEXT: vpcmpltud %zmm2, %zmm0, %k0
+; KNL-NEXT: kshiftlw $14, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: kshiftlw $15, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vmovd %ecx, %xmm0
+; KNL-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $13, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $12, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $11, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $10, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $9, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $8, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $7, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $6, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $5, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $4, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $3, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $2, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftlw $1, %k0, %k1
+; KNL-NEXT: kshiftrw $15, %k1, %k1
+; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; KNL-NEXT: kshiftrw $15, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: vpsllw $7, %ymm0, %ymm0
+; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; KNL-NEXT: sbbl %eax, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm1
+; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
+; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
+; KNL-NEXT: vpslld $31, %zmm1, %zmm1
+; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
+; KNL-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
+; KNL-NEXT: vpslld $31, %zmm0, %zmm0
+; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
+; KNL-NEXT: kmovw %k0, (%rsp)
+; KNL-NEXT: movl (%rsp), %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_insertelement_v32i1:
+; SKX: ## BB#0:
+; SKX-NEXT: cmpl %esi, %edi
+; SKX-NEXT: setb %al
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k1
+; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k2
+; SKX-NEXT: kunpckwd %k1, %k2, %k1
+; SKX-NEXT: vpmovm2w %k1, %zmm0
+; SKX-NEXT: vpmovm2w %k0, %zmm1
+; SKX-NEXT: vmovdqu16 {{.*#+}} zmm2 = [0,1,2,3,32,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; SKX-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
+; SKX-NEXT: vpmovw2m %zmm2, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: retq
+ %cmp_res_i1 = icmp ult i32 %a, %b
+ %cmp_cmp_vec = icmp ult <32 x i32> %x, %y
+ %maskv = insertelement <32 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 4
+ %res = bitcast <32 x i1> %maskv to i32
+ ret i32 %res
+}
+
+define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) {
+; KNL-LABEL: test_iinsertelement_v4i1:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpl %esi, %edi
+; KNL-NEXT: setb %al
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; KNL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; KNL-NEXT: vpextrd $1, %xmm0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: kmovw %eax, %k2
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; KNL-NEXT: vmovdqa64 %zmm1, %zmm2 {%k2} {z}
+; KNL-NEXT: vmovd %xmm0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: kmovw %eax, %k2
+; KNL-NEXT: vmovdqa64 %zmm1, %zmm3 {%k2} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,8,2,3,4,5,6,7]
+; KNL-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
+; KNL-NEXT: vpsllq $63, %zmm4, %zmm2
+; KNL-NEXT: vptestmq %zmm2, %zmm2, %k2
+; KNL-NEXT: vmovdqa64 %zmm1, %zmm2 {%k2} {z}
+; KNL-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,8,3,4,5,6,7]
+; KNL-NEXT: vpermi2q %zmm3, %zmm2, %zmm4
+; KNL-NEXT: vpsllq $63, %zmm4, %zmm2
+; KNL-NEXT: vptestmq %zmm2, %zmm2, %k1
+; KNL-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} {z}
+; KNL-NEXT: vpextrd $3, %xmm0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,8,4,5,6,7]
+; KNL-NEXT: vpermi2q %zmm0, %zmm2, %zmm1
+; KNL-NEXT: vpsllq $63, %zmm1, %zmm0
+; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_iinsertelement_v4i1:
+; SKX: ## BB#0:
+; SKX-NEXT: cmpl %esi, %edi
+; SKX-NEXT: setb %al
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k1
+; SKX-NEXT: vpmovm2d %k1, %xmm0
+; SKX-NEXT: vpmovm2d %k0, %xmm1
+; SKX-NEXT: vpbroadcastq %xmm1, %xmm1
+; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; SKX-NEXT: vpmovd2m %xmm0, %k0
+; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: retq
+ %cmp_res_i1 = icmp ult i32 %a, %b
+ %cmp_cmp_vec = icmp ult <4 x i32> %x, %y
+ %maskv = insertelement <4 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 2
+ %res0 = shufflevector <4 x i1> %maskv, <4 x i1> undef , <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
+ %res = bitcast <8 x i1> %res0 to i8
+ ret i8 %res
+}
+
+define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) {
+; KNL-LABEL: test_iinsertelement_v2i1:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpl %esi, %edi
+; KNL-NEXT: setb %al
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; KNL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; KNL-NEXT: vmovq %xmm0, %rax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: kmovw %eax, %k2
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
+; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2} {z}
+; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
+; KNL-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
+; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_iinsertelement_v2i1:
+; SKX: ## BB#0:
+; SKX-NEXT: cmpl %esi, %edi
+; SKX-NEXT: setb %al
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
+; SKX-NEXT: kshiftlw $1, %k1, %k1
+; SKX-NEXT: kshiftrw $1, %k1, %k1
+; SKX-NEXT: kshiftlw $1, %k0, %k0
+; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: retq
+ %cmp_res_i1 = icmp ult i32 %a, %b
+ %cmp_cmp_vec = icmp ult <2 x i64> %x, %y
+ %maskv = insertelement <2 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 1
+ %res0 = shufflevector <2 x i1> %maskv, <2 x i1> undef , <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %res = bitcast <8 x i1> %res0 to i8
+ ret i8 %res
+}
+
+define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
+; KNL-LABEL: test_extractelement_v2i1:
+; KNL: ## BB#0:
+; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; KNL-NEXT: vmovq %xmm0, %rax
+; KNL-NEXT: testb $1, %al
+; KNL-NEXT: sete %al
+; KNL-NEXT: addb $3, %al
+; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_v2i1:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
+; SKX-NEXT: kshiftlw $15, %k0, %k0
+; SKX-NEXT: kshiftrw $15, %k0, %k0
+; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: testb %al, %al
+; SKX-NEXT: sete %al
+; SKX-NEXT: addb $3, %al
+; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: retq
+ %t1 = icmp ugt <2 x i64> %a, %b
+ %t2 = extractelement <2 x i1> %t1, i32 0
+ %res = select i1 %t2, i8 3, i8 4
+ ret i8 %res
+}
+
+define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
+; KNL-LABEL: test_extractelement_v4i1:
+; KNL: ## BB#0:
+; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; KNL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; KNL-NEXT: vpextrd $3, %xmm0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_v4i1:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
+; SKX-NEXT: kshiftlw $12, %k0, %k0
+; SKX-NEXT: kshiftrw $15, %k0, %k0
+; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: retq
+ %t1 = icmp ugt <4 x i32> %a, %b
+ %t2 = extractelement <4 x i1> %t1, i32 3
+ %res = zext i1 %t2 to i8
+ ret i8 %res
+}
+
+define zeroext i8 @test_extractelement_v32i1(<32 x i8> %a, <32 x i8> %b) {
+; KNL-LABEL: test_extractelement_v32i1:
+; KNL: ## BB#0:
+; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; KNL-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; KNL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
+; KNL-NEXT: vpextrb $2, %xmm0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_v32i1:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
+; SKX-NEXT: kshiftld $29, %k0, %k0
+; SKX-NEXT: kshiftrd $31, %k0, %k0
+; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: retq
+ %t1 = icmp ugt <32 x i8> %a, %b
+ %t2 = extractelement <32 x i1> %t1, i32 2
+ %res = zext i1 %t2 to i8
+ ret i8 %res
+}
+
+define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
+; KNL-LABEL: test_extractelement_v64i1:
+; KNL: ## BB#0:
+; KNL-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; KNL-NEXT: vpxor %ymm0, %ymm3, %ymm2
+; KNL-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpextrb $15, %xmm0, %eax
+; KNL-NEXT: testb $1, %al
+; KNL-NEXT: sete %al
+; KNL-NEXT: addb $3, %al
+; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_v64i1:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
+; SKX-NEXT: kshiftrq $63, %k0, %k0
+; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: testb %al, %al
+; SKX-NEXT: sete %al
+; SKX-NEXT: addb $3, %al
+; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: retq
+ %t1 = icmp ugt <64 x i8> %a, %b
+ %t2 = extractelement <64 x i1> %t1, i32 63
+ %res = select i1 %t2, i8 3, i8 4
+ ret i8 %res
+}