@@ -1113,8 +1113,8 @@ define <16 x i8> @evenelts_v32i16_trunc_v16i16_to_v16i8(<32 x i16> %n2) nounwind
11131113;
11141114; AVX512VBMI-FAST-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
11151115; AVX512VBMI-FAST: # %bb.0:
1116- ; AVX512VBMI-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [64,65,66,67,68,69 ,24,28,32,36,40,44,48,52,56,79]
1117- ; AVX512VBMI-FAST-NEXT: vpmovdb %ymm0 , %xmm2
1116+ ; AVX512VBMI-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [0,4,8,12,16,20 ,24,28,32,36,40,44,48,52,56,79]
1117+ ; AVX512VBMI-FAST-NEXT: vpxor %xmm2, %xmm2 , %xmm2
11181118; AVX512VBMI-FAST-NEXT: vpermi2b %zmm2, %zmm0, %zmm1
11191119; AVX512VBMI-FAST-NEXT: vextracti32x4 $3, %zmm0, %xmm0
11201120; AVX512VBMI-FAST-NEXT: vpextrw $6, %xmm0, %eax
@@ -1124,14 +1124,14 @@ define <16 x i8> @evenelts_v32i16_trunc_v16i16_to_v16i8(<32 x i16> %n2) nounwind
11241124;
11251125; AVX512VBMI-SLOW-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
11261126; AVX512VBMI-SLOW: # %bb.0:
1127- ; AVX512VBMI-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,92,96,100,104,108,112,13,14,15 ]
1128- ; AVX512VBMI-SLOW-NEXT: vpmovdb %ymm0 , %xmm2
1129- ; AVX512VBMI-SLOW-NEXT: vpermt2b %zmm0 , %zmm1 , %zmm2
1127+ ; AVX512VBMI-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,4,8,12,16,20,24,28,32,36,40,44,48,77,78,79 ]
1128+ ; AVX512VBMI-SLOW-NEXT: vpxor %xmm2, %xmm2 , %xmm2
1129+ ; AVX512VBMI-SLOW-NEXT: vpermi2b %zmm2 , %zmm0 , %zmm1
11301130; AVX512VBMI-SLOW-NEXT: vextracti32x4 $3, %zmm0, %xmm0
11311131; AVX512VBMI-SLOW-NEXT: vpextrw $6, %xmm0, %eax
11321132; AVX512VBMI-SLOW-NEXT: vpextrw $4, %xmm0, %ecx
11331133; AVX512VBMI-SLOW-NEXT: vpextrw $2, %xmm0, %edx
1134- ; AVX512VBMI-SLOW-NEXT: vpinsrb $13, %edx, %xmm2 , %xmm0
1134+ ; AVX512VBMI-SLOW-NEXT: vpinsrb $13, %edx, %xmm1 , %xmm0
11351135; AVX512VBMI-SLOW-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
11361136; AVX512VBMI-SLOW-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
11371137; AVX512VBMI-SLOW-NEXT: vzeroupper
0 commit comments