diff options
Diffstat (limited to 'test/CodeGen/X86/bitcast-int-to-vector-bool.ll')
-rw-r--r-- | test/CodeGen/X86/bitcast-int-to-vector-bool.ll | 742 |
1 files changed, 167 insertions, 575 deletions
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll index a190e0575522..6d9f832d861f 100644 --- a/test/CodeGen/X86/bitcast-int-to-vector-bool.ll +++ b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll @@ -7,38 +7,47 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) { ; SSE2-SSSE3-LABEL: bitcast_i2_2i1: -; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: shrl %eax -; SSE2-SSSE3-NEXT: andl $1, %eax -; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3: # %bb.0: +; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2] +; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2] +; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: psrlq $63, %xmm0 ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: bitcast_i2_2i1: -; AVX12: # BB#0: -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vmovq %rcx, %xmm0 -; AVX12-NEXT: shrl %eax -; AVX12-NEXT: andl $1, %eax -; AVX12-NEXT: vmovq %rax, %xmm1 -; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX12-NEXT: retq +; AVX1-LABEL: bitcast_i2_2i1: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2] +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: bitcast_i2_2i1: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2] +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlq $63, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i2_2i1: -; AVX512: # BB#0: +; AVX512: # %bb.0: ; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) ; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; AVX512-NEXT: kmovd %eax, %k1 ; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> +; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = bitcast i2 %a0 to <2 x i1> @@ -47,65 +56,43 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) { define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) { ; SSE2-SSSE3-LABEL: bitcast_i4_4i1: -; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-SSSE3-NEXT: movd %eax, %xmm0 -; SSE2-SSSE3-NEXT: shrl %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm2 -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-SSSE3: # %bb.0: +; SSE2-SSSE3-NEXT: movd %edi, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8] +; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: psrld $31, %xmm0 ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: bitcast_i4_4i1: -; AVX1: # BB#0: -; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl $3, %eax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8] +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: bitcast_i4_4i1: -; AVX2: # BB#0: -; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl %ecx -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8] ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i4_4i1: -; AVX512: # BB#0: +; AVX512: # %bb.0: ; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) ; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; AVX512-NEXT: kmovd %eax, %k1 ; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 ; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} -; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> +; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = bitcast i4 %a0 to <4 x i1> @@ -114,86 +101,39 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) { define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) { ; SSE2-SSSE3-LABEL: bitcast_i8_8i1: -; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: shrl $7, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm3 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-SSSE3: # %bb.0: +; SSE2-SSSE3-NEXT: movd %edi, %xmm0 +; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128] +; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pcmpeqw %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: psrlw $15, %xmm0 ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: bitcast_i8_8i1: -; AVX12: # BB#0: -; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: movl %eax, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $2, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $3, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $4, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $5, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $6, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shrl $7, %eax -; AVX12-NEXT: movzwl %ax, %eax -; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 -; AVX12-NEXT: retq +; AVX1-LABEL: bitcast_i8_8i1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128] +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: bitcast_i8_8i1: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128] +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i8_8i1: -; AVX512: # BB#0: +; AVX512: # %bb.0: ; AVX512-NEXT: kmovd %edi, %k0 ; AVX512-NEXT: vpmovm2w %k0, %xmm0 ; AVX512-NEXT: retq @@ -202,159 +142,54 @@ define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) { } define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) { -; SSE2-SSSE3-LABEL: bitcast_i16_16i1: -; SSE2-SSSE3: # BB#0: -; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $7, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $6, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $5, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $4, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $3, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $11, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $10, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $9, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $8, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $13, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $12, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $14, %ecx -; SSE2-SSSE3-NEXT: andl $1, %ecx -; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 -; SSE2-SSSE3-NEXT: shrl $15, %eax -; SSE2-SSSE3-NEXT: movzwl %ax, %eax -; SSE2-SSSE3-NEXT: movd %eax, %xmm4 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-SSSE3-NEXT: retq +; SSE2-LABEL: bitcast_i16_16i1: +; SSE2: # %bb.0: +; SSE2-NEXT: movd %edi, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; SSE2-NEXT: psrlw $7, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: retq ; -; AVX12-LABEL: bitcast_i16_16i1: -; AVX12: # BB#0: -; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp) -; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: movl %eax, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: vmovd %edx, %xmm0 -; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $2, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $3, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $4, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $5, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $6, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $7, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $8, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $9, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $10, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $11, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $12, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $13, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: movl %eax, %ecx -; AVX12-NEXT: shrl $14, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 -; AVX12-NEXT: shrl $15, %eax -; AVX12-NEXT: movzwl %ax, %eax -; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX12-NEXT: retq +; SSSE3-LABEL: bitcast_i16_16i1: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movd %edi, %xmm0 +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; SSSE3-NEXT: pand %xmm1, %xmm0 +; SSSE3-NEXT: pcmpeqb %xmm1, %xmm0 +; SSSE3-NEXT: psrlw $7, %xmm0 +; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: retq +; +; AVX1-LABEL: bitcast_i16_16i1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: bitcast_i16_16i1: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] +; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745] +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i16_16i1: -; AVX512: # BB#0: +; AVX512: # %bb.0: ; AVX512-NEXT: kmovd %edi, %k0 ; AVX512-NEXT: vpmovm2b %k0, %xmm0 ; AVX512-NEXT: retq @@ -364,297 +199,54 @@ define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) { define <32 x i1> @bitcast_i32_32i1(i32 %a0) { ; SSE2-SSSE3-LABEL: bitcast_i32_32i1: -; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: movl %esi, (%rdi) ; SSE2-SSSE3-NEXT: movq %rdi, %rax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: bitcast_i32_32i1: -; AVX1: # BB#0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: .Lcfi0: -; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: .Lcfi1: -; AVX1-NEXT: .cfi_offset %rbp, -16 -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: .Lcfi2: -; AVX1-NEXT: .cfi_def_cfa_register %rbp -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $32, %rsp -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $17, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $18, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $19, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $20, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $21, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $22, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $23, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $24, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $25, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $26, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $27, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $28, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $29, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $30, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: movl %edi, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm1 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $3, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $4, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $5, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $6, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $7, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $9, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $10, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $11, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $12, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $13, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX1-NEXT: movl %edi, %eax -; AVX1-NEXT: shrl $14, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX1-NEXT: shrl $15, %edi -; AVX1-NEXT: andl $1, %edi -; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: movq %rbp, %rsp -; AVX1-NEXT: popq %rbp +; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: bitcast_i32_32i1: -; AVX2: # BB#0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .Lcfi0: -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .Lcfi1: -; AVX2-NEXT: .cfi_offset %rbp, -16 -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .Lcfi2: -; AVX2-NEXT: .cfi_def_cfa_register %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $32, %rsp -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $17, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: shrl $16, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $18, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $19, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $20, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $21, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $22, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $23, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $24, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $25, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $26, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $27, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $28, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $29, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $30, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $31, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: movl %edi, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm1 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $2, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $4, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $5, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $6, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $7, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $9, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $10, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $11, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $12, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $13, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX2-NEXT: movl %edi, %eax -; AVX2-NEXT: shrl $14, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX2-NEXT: shrl $15, %edi -; AVX2-NEXT: andl $1, %edi -; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745] +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: bitcast_i32_32i1: -; AVX512: # BB#0: +; AVX512: # %bb.0: ; AVX512-NEXT: kmovd %edi, %k0 ; AVX512-NEXT: vpmovm2b %k0, %ymm0 ; AVX512-NEXT: retq @@ -664,19 +256,19 @@ define <32 x i1> @bitcast_i32_32i1(i32 %a0) { define <64 x i1> @bitcast_i64_64i1(i64 %a0) { ; SSE2-SSSE3-LABEL: bitcast_i64_64i1: -; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: movq %rsi, (%rdi) ; SSE2-SSSE3-NEXT: movq %rdi, %rax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: bitcast_i64_64i1: -; AVX12: # BB#0: +; AVX12: # %bb.0: ; AVX12-NEXT: movq %rsi, (%rdi) ; AVX12-NEXT: movq %rdi, %rax ; AVX12-NEXT: retq ; ; AVX512-LABEL: bitcast_i64_64i1: -; AVX512: # BB#0: +; AVX512: # %bb.0: ; AVX512-NEXT: kmovq %rdi, %k0 ; AVX512-NEXT: vpmovm2b %k0, %zmm0 ; AVX512-NEXT: retq |