| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512F |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512BW |
| |
| define void @avg_v4i8(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v4i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: pavgb %xmm0, %xmm1 |
| ; SSE2-NEXT: movd %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v4i8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovd %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <4 x i8>, ptr %a |
| %2 = load <4 x i8>, ptr %b |
| %3 = zext <4 x i8> %1 to <4 x i32> |
| %4 = zext <4 x i8> %2 to <4 x i32> |
| %5 = add nuw nsw <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <4 x i32> %5, %4 |
| %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <4 x i32> %7 to <4 x i8> |
| store <4 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v8i8(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v8i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero |
| ; SSE2-NEXT: pavgb %xmm0, %xmm1 |
| ; SSE2-NEXT: movq %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v8i8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero |
| ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <8 x i8>, ptr %a |
| %2 = load <8 x i8>, ptr %b |
| %3 = zext <8 x i8> %1 to <8 x i32> |
| %4 = zext <8 x i8> %2 to <8 x i32> |
| %5 = add nuw nsw <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <8 x i32> %5, %4 |
| %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <8 x i32> %7 to <8 x i8> |
| store <8 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v16i8(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v16i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: pavgb (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v16i8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpavgb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <16 x i8>, ptr %a |
| %2 = load <16 x i8>, ptr %b |
| %3 = zext <16 x i8> %1 to <16 x i32> |
| %4 = zext <16 x i8> %2 to <16 x i32> |
| %5 = add nuw nsw <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <16 x i32> %5, %4 |
| %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <16 x i32> %7 to <16 x i8> |
| store <16 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v24i8(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v24i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: pavgb (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movq %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v24i8: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vmovq %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v24i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vmovq %xmm1, (%rax) |
| ; AVX2-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v24i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vmovq %xmm1, (%rax) |
| ; AVX512-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = load <24 x i8>, ptr %a |
| %2 = load <24 x i8>, ptr %b |
| %3 = zext <24 x i8> %1 to <24 x i32> |
| %4 = zext <24 x i8> %2 to <24 x i32> |
| %5 = add nuw nsw <24 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <24 x i32> %5, %4 |
| %7 = lshr <24 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <24 x i32> %7 to <24 x i8> |
| store <24 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v32i8(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v32i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: pavgb (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v32i8: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v32i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v32i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = load <32 x i8>, ptr %a |
| %2 = load <32 x i8>, ptr %b |
| %3 = zext <32 x i8> %1 to <32 x i32> |
| %4 = zext <32 x i8> %2 to <32 x i32> |
| %5 = add nuw nsw <32 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <32 x i32> %5, %4 |
| %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <32 x i32> %7 to <32 x i8> |
| store <32 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v48i8(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v48i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm2 |
| ; SSE2-NEXT: pavgb (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: pavgb 32(%rsi), %xmm2 |
| ; SSE2-NEXT: movdqu %xmm2, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v48i8: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vpavgb 32(%rsi), %xmm2, %xmm2 |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v48i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX2-NEXT: vpavgb 32(%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v48i8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpavgb 32(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v48i8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpavgb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, (%rax) |
| ; AVX512BW-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %1 = load <48 x i8>, ptr %a |
| %2 = load <48 x i8>, ptr %b |
| %3 = zext <48 x i8> %1 to <48 x i32> |
| %4 = zext <48 x i8> %2 to <48 x i32> |
| %5 = add nuw nsw <48 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <48 x i32> %5, %4 |
| %7 = lshr <48 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <48 x i32> %7 to <48 x i8> |
| store <48 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v64i8(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v64i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm3 |
| ; SSE2-NEXT: pavgb (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: pavgb 32(%rsi), %xmm2 |
| ; SSE2-NEXT: pavgb 48(%rsi), %xmm3 |
| ; SSE2-NEXT: movdqu %xmm3, (%rax) |
| ; SSE2-NEXT: movdqu %xmm2, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v64i8: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vpavgb 32(%rsi), %xmm2, %xmm2 |
| ; AVX1-NEXT: vpavgb 48(%rsi), %xmm3, %xmm3 |
| ; AVX1-NEXT: vmovdqu %xmm3, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v64i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpavgb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v64i8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpavgb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v64i8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpavgb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %1 = load <64 x i8>, ptr %a |
| %2 = load <64 x i8>, ptr %b |
| %3 = zext <64 x i8> %1 to <64 x i32> |
| %4 = zext <64 x i8> %2 to <64 x i32> |
| %5 = add nuw nsw <64 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <64 x i32> %5, %4 |
| %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <64 x i32> %7 to <64 x i8> |
| store <64 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v4i16(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v4i16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero |
| ; SSE2-NEXT: pavgw %xmm0, %xmm1 |
| ; SSE2-NEXT: movq %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v4i16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero |
| ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <4 x i16>, ptr %a |
| %2 = load <4 x i16>, ptr %b |
| %3 = zext <4 x i16> %1 to <4 x i32> |
| %4 = zext <4 x i16> %2 to <4 x i32> |
| %5 = add nuw nsw <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <4 x i32> %5, %4 |
| %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <4 x i32> %7 to <4 x i16> |
| store <4 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v8i16(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v8i16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: pavgw (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v8i16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpavgw (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <8 x i16>, ptr %a |
| %2 = load <8 x i16>, ptr %b |
| %3 = zext <8 x i16> %1 to <8 x i32> |
| %4 = zext <8 x i16> %2 to <8 x i32> |
| %5 = add nuw nsw <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <8 x i32> %5, %4 |
| %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <8 x i32> %7 to <8 x i16> |
| store <8 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v16i16(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v16i16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: pavgw (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgw 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v16i16: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v16i16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v16i16: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = load <16 x i16>, ptr %a |
| %2 = load <16 x i16>, ptr %b |
| %3 = zext <16 x i16> %1 to <16 x i32> |
| %4 = zext <16 x i16> %2 to <16 x i32> |
| %5 = add nuw nsw <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <16 x i32> %5, %4 |
| %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <16 x i32> %7 to <16 x i16> |
| store <16 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v32i16(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v32i16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm3 |
| ; SSE2-NEXT: pavgw (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgw 16(%rsi), %xmm1 |
| ; SSE2-NEXT: pavgw 32(%rsi), %xmm2 |
| ; SSE2-NEXT: pavgw 48(%rsi), %xmm3 |
| ; SSE2-NEXT: movdqu %xmm3, (%rax) |
| ; SSE2-NEXT: movdqu %xmm2, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v32i16: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vpavgw 32(%rsi), %xmm2, %xmm2 |
| ; AVX1-NEXT: vpavgw 48(%rsi), %xmm3, %xmm3 |
| ; AVX1-NEXT: vmovdqu %xmm3, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v32i16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v32i16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v32i16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %1 = load <32 x i16>, ptr %a |
| %2 = load <32 x i16>, ptr %b |
| %3 = zext <32 x i16> %1 to <32 x i32> |
| %4 = zext <32 x i16> %2 to <32 x i32> |
| %5 = add nuw nsw <32 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <32 x i32> %5, %4 |
| %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <32 x i32> %7 to <32 x i16> |
| store <32 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v40i16(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v40i16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa 64(%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm2 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm3 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm4 |
| ; SSE2-NEXT: pavgw (%rsi), %xmm1 |
| ; SSE2-NEXT: pavgw 16(%rsi), %xmm2 |
| ; SSE2-NEXT: pavgw 32(%rsi), %xmm3 |
| ; SSE2-NEXT: pavgw 48(%rsi), %xmm4 |
| ; SSE2-NEXT: pavgw 64(%rsi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: movdqu %xmm4, (%rax) |
| ; SSE2-NEXT: movdqu %xmm3, (%rax) |
| ; SSE2-NEXT: movdqu %xmm2, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v40i16: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vpavgw 32(%rsi), %xmm2, %xmm2 |
| ; AVX1-NEXT: vpavgw 48(%rsi), %xmm3, %xmm3 |
| ; AVX1-NEXT: vmovdqa 64(%rdi), %xmm4 |
| ; AVX1-NEXT: vpavgw 64(%rsi), %xmm4, %xmm4 |
| ; AVX1-NEXT: vmovdqu %xmm3, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm4, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v40i16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vmovdqa 64(%rdi), %xmm2 |
| ; AVX2-NEXT: vpavgw 64(%rsi), %xmm2, %xmm2 |
| ; AVX2-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX2-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v40i16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vmovdqa 64(%rdi), %xmm2 |
| ; AVX512F-NEXT: vpavgw 64(%rsi), %xmm2, %xmm2 |
| ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512F-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v40i16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512BW-NEXT: vpavgw 64(%rsi), %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %1 = load <40 x i16>, ptr %a |
| %2 = load <40 x i16>, ptr %b |
| %3 = zext <40 x i16> %1 to <40 x i32> |
| %4 = zext <40 x i16> %2 to <40 x i32> |
| %5 = add nuw nsw <40 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %6 = add nuw nsw <40 x i32> %5, %4 |
| %7 = lshr <40 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <40 x i32> %7 to <40 x i16> |
| store <40 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v4i8_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v4i8_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: pavgb %xmm0, %xmm1 |
| ; SSE2-NEXT: movd %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v4i8_2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovd %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <4 x i8>, ptr %a |
| %2 = load <4 x i8>, ptr %b |
| %3 = zext <4 x i8> %1 to <4 x i32> |
| %4 = zext <4 x i8> %2 to <4 x i32> |
| %5 = add nuw nsw <4 x i32> %3, %4 |
| %6 = add nuw nsw <4 x i32> %5, <i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <4 x i32> %7 to <4 x i8> |
| store <4 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v8i8_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v8i8_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero |
| ; SSE2-NEXT: pavgb %xmm0, %xmm1 |
| ; SSE2-NEXT: movq %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v8i8_2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero |
| ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <8 x i8>, ptr %a |
| %2 = load <8 x i8>, ptr %b |
| %3 = zext <8 x i8> %1 to <8 x i32> |
| %4 = zext <8 x i8> %2 to <8 x i32> |
| %5 = add nuw nsw <8 x i32> %3, %4 |
| %6 = add nuw nsw <8 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <8 x i32> %7 to <8 x i8> |
| store <8 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v16i8_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v16i8_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: pavgb (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v16i8_2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpavgb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <16 x i8>, ptr %a |
| %2 = load <16 x i8>, ptr %b |
| %3 = zext <16 x i8> %1 to <16 x i32> |
| %4 = zext <16 x i8> %2 to <16 x i32> |
| %5 = add nuw nsw <16 x i32> %3, %4 |
| %6 = add nuw nsw <16 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <16 x i32> %7 to <16 x i8> |
| store <16 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v32i8_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v32i8_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: pavgb (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v32i8_2: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v32i8_2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v32i8_2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = load <32 x i8>, ptr %a |
| %2 = load <32 x i8>, ptr %b |
| %3 = zext <32 x i8> %1 to <32 x i32> |
| %4 = zext <32 x i8> %2 to <32 x i32> |
| %5 = add nuw nsw <32 x i32> %3, %4 |
| %6 = add nuw nsw <32 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <32 x i32> %7 to <32 x i8> |
| store <32 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v64i8_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v64i8_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movaps (%rsi), %xmm0 |
| ; SSE2-NEXT: movaps 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movaps 32(%rsi), %xmm2 |
| ; SSE2-NEXT: movaps 48(%rsi), %xmm3 |
| ; SSE2-NEXT: movups %xmm3, (%rax) |
| ; SSE2-NEXT: movups %xmm2, (%rax) |
| ; SSE2-NEXT: movups %xmm1, (%rax) |
| ; SSE2-NEXT: movups %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v64i8_2: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovaps (%rsi), %ymm0 |
| ; AVX1-NEXT: vmovaps 32(%rsi), %ymm1 |
| ; AVX1-NEXT: vmovups %ymm1, (%rax) |
| ; AVX1-NEXT: vmovups %ymm0, (%rax) |
| ; AVX1-NEXT: vzeroupper |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v64i8_2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovaps (%rsi), %ymm0 |
| ; AVX2-NEXT: vmovaps 32(%rsi), %ymm1 |
| ; AVX2-NEXT: vmovups %ymm1, (%rax) |
| ; AVX2-NEXT: vmovups %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v64i8_2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovaps (%rsi), %zmm0 |
| ; AVX512-NEXT: vmovups %zmm0, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = load <64 x i8>, ptr %a |
| %2 = load <64 x i8>, ptr %b |
| %3 = zext <64 x i8> %1 to <64 x i32> |
| %4 = zext <64 x i8> %2 to <64 x i32> |
| %5 = add nuw nsw <64 x i32> %4, %4 |
| %6 = add nuw nsw <64 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <64 x i32> %7 to <64 x i8> |
| store <64 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| |
| define void @avg_v4i16_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v4i16_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero |
| ; SSE2-NEXT: pavgw %xmm0, %xmm1 |
| ; SSE2-NEXT: movq %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v4i16_2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero |
| ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <4 x i16>, ptr %a |
| %2 = load <4 x i16>, ptr %b |
| %3 = zext <4 x i16> %1 to <4 x i32> |
| %4 = zext <4 x i16> %2 to <4 x i32> |
| %5 = add nuw nsw <4 x i32> %3, %4 |
| %6 = add nuw nsw <4 x i32> %5, <i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <4 x i32> %7 to <4 x i16> |
| store <4 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v8i16_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v8i16_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: pavgw (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v8i16_2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpavgw (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <8 x i16>, ptr %a |
| %2 = load <8 x i16>, ptr %b |
| %3 = zext <8 x i16> %1 to <8 x i32> |
| %4 = zext <8 x i16> %2 to <8 x i32> |
| %5 = add nuw nsw <8 x i32> %3, %4 |
| %6 = add nuw nsw <8 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <8 x i32> %7 to <8 x i16> |
| store <8 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v16i16_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v16i16_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: pavgw (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgw 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v16i16_2: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v16i16_2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v16i16_2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = load <16 x i16>, ptr %a |
| %2 = load <16 x i16>, ptr %b |
| %3 = zext <16 x i16> %1 to <16 x i32> |
| %4 = zext <16 x i16> %2 to <16 x i32> |
| %5 = add nuw nsw <16 x i32> %3, %4 |
| %6 = add nuw nsw <16 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <16 x i32> %7 to <16 x i16> |
| store <16 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v32i16_2(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: avg_v32i16_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm3 |
| ; SSE2-NEXT: pavgw (%rsi), %xmm0 |
| ; SSE2-NEXT: pavgw 16(%rsi), %xmm1 |
| ; SSE2-NEXT: pavgw 32(%rsi), %xmm2 |
| ; SSE2-NEXT: pavgw 48(%rsi), %xmm3 |
| ; SSE2-NEXT: movdqu %xmm3, (%rax) |
| ; SSE2-NEXT: movdqu %xmm2, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v32i16_2: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1 |
| ; AVX1-NEXT: vpavgw 32(%rsi), %xmm2, %xmm2 |
| ; AVX1-NEXT: vpavgw 48(%rsi), %xmm3, %xmm3 |
| ; AVX1-NEXT: vmovdqu %xmm3, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v32i16_2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v32i16_2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v32i16_2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %1 = load <32 x i16>, ptr %a |
| %2 = load <32 x i16>, ptr %b |
| %3 = zext <32 x i16> %1 to <32 x i32> |
| %4 = zext <32 x i16> %2 to <32 x i32> |
| %5 = add nuw nsw <32 x i32> %3, %4 |
| %6 = add nuw nsw <32 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %8 = trunc <32 x i32> %7 to <32 x i16> |
| store <32 x i16> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v4i8_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v4i8_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: pavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movd %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v4i8_const: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; AVX-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovd %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <4 x i8>, ptr %a |
| %2 = zext <4 x i8> %1 to <4 x i32> |
| %3 = add nuw nsw <4 x i32> %2, <i32 1, i32 2, i32 3, i32 4> |
| %4 = lshr <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <4 x i32> %4 to <4 x i8> |
| store <4 x i8> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v8i8_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v8i8_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE2-NEXT: pavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movq %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v8i8_const: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <8 x i8>, ptr %a |
| %2 = zext <8 x i8> %1 to <8 x i32> |
| %3 = add nuw nsw <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> |
| %4 = lshr <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <8 x i32> %4 to <8 x i8> |
| store <8 x i8> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v16i8_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v16i8_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: pavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v16i8_const: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <16 x i8>, ptr %a |
| %2 = zext <16 x i8> %1 to <16 x i32> |
| %3 = add nuw nsw <16 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> |
| %4 = lshr <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <16 x i32> %4 to <16 x i8> |
| store <16 x i8> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v32i8_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v32i8_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE2-NEXT: pavgb %xmm0, %xmm1 |
| ; SSE2-NEXT: pavgb 16(%rdi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v32i8_const: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; AVX1-NEXT: # xmm0 = mem[0,0] |
| ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm1 |
| ; AVX1-NEXT: vpavgb 16(%rdi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v32i8_const: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v32i8_const: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = load <32 x i8>, ptr %a |
| %2 = zext <32 x i8> %1 to <32 x i32> |
| %3 = add nuw nsw <32 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> |
| %4 = lshr <32 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <32 x i32> %4 to <32 x i8> |
| store <32 x i8> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v64i8_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v64i8_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE2-NEXT: pavgb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm2 |
| ; SSE2-NEXT: pavgb %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm3 |
| ; SSE2-NEXT: pavgb %xmm0, %xmm3 |
| ; SSE2-NEXT: pavgb 48(%rdi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: movdqu %xmm3, (%rax) |
| ; SSE2-NEXT: movdqu %xmm2, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v64i8_const: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; AVX1-NEXT: # xmm0 = mem[0,0] |
| ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm1 |
| ; AVX1-NEXT: vpavgb 16(%rdi), %xmm0, %xmm2 |
| ; AVX1-NEXT: vpavgb 32(%rdi), %xmm0, %xmm3 |
| ; AVX1-NEXT: vpavgb 48(%rdi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm3, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v64i8_const: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpavgb 32(%rdi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v64i8_const: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpavgb 32(%rdi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v64i8_const: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %1 = load <64 x i8>, ptr %a |
| %2 = zext <64 x i8> %1 to <64 x i32> |
| %3 = add nuw nsw <64 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> |
| %4 = lshr <64 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <64 x i32> %4 to <64 x i8> |
| store <64 x i8> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v4i16_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v4i16_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE2-NEXT: pavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movq %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v4i16_const: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <4 x i16>, ptr %a |
| %2 = zext <4 x i16> %1 to <4 x i32> |
| %3 = add nuw nsw <4 x i32> %2, <i32 1, i32 2, i32 3, i32 4> |
| %4 = lshr <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <4 x i32> %4 to <4 x i16> |
| store <4 x i16> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v8i16_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v8i16_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: pavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v8i16_const: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %1 = load <8 x i16>, ptr %a |
| %2 = zext <8 x i16> %1 to <8 x i32> |
| %3 = add nuw nsw <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> |
| %4 = lshr <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <8 x i32> %4 to <8 x i16> |
| store <8 x i16> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v16i16_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v16i16_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE2-NEXT: pavgw %xmm0, %xmm1 |
| ; SSE2-NEXT: pavgw 16(%rdi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v16i16_const: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7] |
| ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm1 |
| ; AVX1-NEXT: vpavgw 16(%rdi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v16i16_const: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v16i16_const: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = load <16 x i16>, ptr %a |
| %2 = zext <16 x i16> %1 to <16 x i32> |
| %3 = add nuw nsw <16 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> |
| %4 = lshr <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <16 x i32> %4 to <16 x i16> |
| store <16 x i16> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define void @avg_v32i16_const(ptr %a) nounwind { |
| ; SSE2-LABEL: avg_v32i16_const: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE2-NEXT: pavgw %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm2 |
| ; SSE2-NEXT: pavgw %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm3 |
| ; SSE2-NEXT: pavgw %xmm0, %xmm3 |
| ; SSE2-NEXT: pavgw 48(%rdi), %xmm0 |
| ; SSE2-NEXT: movdqu %xmm0, (%rax) |
| ; SSE2-NEXT: movdqu %xmm3, (%rax) |
| ; SSE2-NEXT: movdqu %xmm2, (%rax) |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v32i16_const: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7] |
| ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm1 |
| ; AVX1-NEXT: vpavgw 16(%rdi), %xmm0, %xmm2 |
| ; AVX1-NEXT: vpavgw 32(%rdi), %xmm0, %xmm3 |
| ; AVX1-NEXT: vpavgw 48(%rdi), %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm3, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm2, (%rax) |
| ; AVX1-NEXT: vmovdqu %xmm1, (%rax) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v32i16_const: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; AVX2-NEXT: # ymm0 = mem[0,1,0,1] |
| ; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpavgw 32(%rdi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX2-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v32i16_const: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; AVX512F-NEXT: # ymm0 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpavgw 32(%rdi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) |
| ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v32i16_const: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %1 = load <32 x i16>, ptr %a |
| %2 = zext <32 x i16> %1 to <32 x i32> |
| %3 = add nuw nsw <32 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> |
| %4 = lshr <32 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %5 = trunc <32 x i32> %4 to <32 x i16> |
| store <32 x i16> %5, ptr undef, align 4 |
| ret void |
| } |
| |
| define <16 x i8> @avg_v16i8_3(<16 x i8> %a, <16 x i8> %b) nounwind { |
| ; SSE2-LABEL: avg_v16i8_3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pavgb %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: avg_v16i8_3: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %za = zext <16 x i8> %a to <16 x i16> |
| %zb = zext <16 x i8> %b to <16 x i16> |
| %add = add nuw nsw <16 x i16> %za, %zb |
| %add1 = add nuw nsw <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %lshr = lshr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %res = trunc <16 x i16> %lshr to <16 x i8> |
| ret <16 x i8> %res |
| } |
| |
| define <32 x i8> @avg_v32i8_3(<32 x i8> %a, <32 x i8> %b) nounwind { |
| ; SSE2-LABEL: avg_v32i8_3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pavgb %xmm2, %xmm0 |
| ; SSE2-NEXT: pavgb %xmm3, %xmm1 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v32i8_3: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 |
| ; AVX1-NEXT: vpavgb %xmm2, %xmm3, %xmm2 |
| ; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v32i8_3: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: avg_v32i8_3: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0 |
| ; AVX512-NEXT: retq |
| %za = zext <32 x i8> %a to <32 x i16> |
| %zb = zext <32 x i8> %b to <32 x i16> |
| %add = add nuw nsw <32 x i16> %za, %zb |
| %add1 = add nuw nsw <32 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %lshr = lshr <32 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %res = trunc <32 x i16> %lshr to <32 x i8> |
| ret <32 x i8> %res |
| } |
| |
| define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind { |
| ; SSE2-LABEL: avg_v64i8_3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pavgb %xmm4, %xmm0 |
| ; SSE2-NEXT: pavgb %xmm5, %xmm1 |
| ; SSE2-NEXT: pavgb %xmm6, %xmm2 |
| ; SSE2-NEXT: pavgb %xmm7, %xmm3 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v64i8_3: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 |
| ; AVX1-NEXT: vpavgb %xmm4, %xmm5, %xmm4 |
| ; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 |
| ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 |
| ; AVX1-NEXT: vpavgb %xmm2, %xmm4, %xmm2 |
| ; AVX1-NEXT: vpavgb %xmm3, %xmm1, %xmm1 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v64i8_3: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpavgb %ymm3, %ymm1, %ymm1 |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v64i8_3: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 |
| ; AVX512F-NEXT: vpavgb %ymm2, %ymm3, %ymm2 |
| ; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v64i8_3: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vpavgb %zmm1, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: retq |
| %za = zext <64 x i8> %a to <64 x i16> |
| %zb = zext <64 x i8> %b to <64 x i16> |
| %add = add nuw nsw <64 x i16> %za, %zb |
| %add1 = add nuw nsw <64 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %lshr = lshr <64 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %res = trunc <64 x i16> %lshr to <64 x i8> |
| ret <64 x i8> %res |
| } |
| |
| define <512 x i8> @avg_v512i8_3(<512 x i8> %a, <512 x i8> %b) nounwind { |
| ; SSE2-LABEL: avg_v512i8_3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq %rdi, %rax |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 496(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 480(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 464(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 448(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 432(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 416(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 400(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 384(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 368(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 352(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 336(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 320(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 304(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 288(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 272(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 256(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 240(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 224(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 208(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 192(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 176(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 160(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 144(%rdi) |
| ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 |
| ; SSE2-NEXT: movdqa %xmm8, 128(%rdi) |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm7 |
| ; SSE2-NEXT: movdqa %xmm7, 112(%rdi) |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm6 |
| ; SSE2-NEXT: movdqa %xmm6, 96(%rdi) |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm5 |
| ; SSE2-NEXT: movdqa %xmm5, 80(%rdi) |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm4 |
| ; SSE2-NEXT: movdqa %xmm4, 64(%rdi) |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm3 |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rdi) |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, 32(%rdi) |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rdi) |
| ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, (%rdi) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: avg_v512i8_3: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: pushq %rbp |
| ; AVX1-NEXT: movq %rsp, %rbp |
| ; AVX1-NEXT: andq $-32, %rsp |
| ; AVX1-NEXT: subq $32, %rsp |
| ; AVX1-NEXT: movq %rdi, %rax |
| ; AVX1-NEXT: vmovdqa 256(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 768(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 496(%rdi) |
| ; AVX1-NEXT: vmovdqa 240(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 752(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 480(%rdi) |
| ; AVX1-NEXT: vmovdqa 224(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 736(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 464(%rdi) |
| ; AVX1-NEXT: vmovdqa 208(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 720(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 448(%rdi) |
| ; AVX1-NEXT: vmovdqa 192(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 704(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 432(%rdi) |
| ; AVX1-NEXT: vmovdqa 176(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 688(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 416(%rdi) |
| ; AVX1-NEXT: vmovdqa 160(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 672(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 400(%rdi) |
| ; AVX1-NEXT: vmovdqa 144(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 656(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 384(%rdi) |
| ; AVX1-NEXT: vmovdqa 128(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 640(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 368(%rdi) |
| ; AVX1-NEXT: vmovdqa 112(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 624(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 352(%rdi) |
| ; AVX1-NEXT: vmovdqa 96(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 608(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 336(%rdi) |
| ; AVX1-NEXT: vmovdqa 80(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 592(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 320(%rdi) |
| ; AVX1-NEXT: vmovdqa 64(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 576(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 304(%rdi) |
| ; AVX1-NEXT: vmovdqa 48(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 560(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 288(%rdi) |
| ; AVX1-NEXT: vmovdqa 32(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 544(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 272(%rdi) |
| ; AVX1-NEXT: vmovdqa 16(%rbp), %xmm8 |
| ; AVX1-NEXT: vpavgb 528(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 256(%rdi) |
| ; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8 |
| ; AVX1-NEXT: vpavgb 512(%rbp), %xmm8, %xmm8 |
| ; AVX1-NEXT: vmovdqa %xmm8, 240(%rdi) |
| ; AVX1-NEXT: vpavgb 496(%rbp), %xmm7, %xmm7 |
| ; AVX1-NEXT: vmovdqa %xmm7, 224(%rdi) |
| ; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7 |
| ; AVX1-NEXT: vpavgb 480(%rbp), %xmm7, %xmm7 |
| ; AVX1-NEXT: vmovdqa %xmm7, 208(%rdi) |
| ; AVX1-NEXT: vpavgb 464(%rbp), %xmm6, %xmm6 |
| ; AVX1-NEXT: vmovdqa %xmm6, 192(%rdi) |
| ; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6 |
| ; AVX1-NEXT: vpavgb 448(%rbp), %xmm6, %xmm6 |
| ; AVX1-NEXT: vmovdqa %xmm6, 176(%rdi) |
| ; AVX1-NEXT: vpavgb 432(%rbp), %xmm5, %xmm5 |
| ; AVX1-NEXT: vmovdqa %xmm5, 160(%rdi) |
| ; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5 |
| ; AVX1-NEXT: vpavgb 416(%rbp), %xmm5, %xmm5 |
| ; AVX1-NEXT: vmovdqa %xmm5, 144(%rdi) |
| ; AVX1-NEXT: vpavgb 400(%rbp), %xmm4, %xmm4 |
| ; AVX1-NEXT: vmovdqa %xmm4, 128(%rdi) |
| ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 |
| ; AVX1-NEXT: vpavgb 384(%rbp), %xmm4, %xmm4 |
| ; AVX1-NEXT: vmovdqa %xmm4, 112(%rdi) |
| ; AVX1-NEXT: vpavgb 368(%rbp), %xmm3, %xmm3 |
| ; AVX1-NEXT: vmovdqa %xmm3, 96(%rdi) |
| ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 |
| ; AVX1-NEXT: vpavgb 352(%rbp), %xmm3, %xmm3 |
| ; AVX1-NEXT: vmovdqa %xmm3, 80(%rdi) |
| ; AVX1-NEXT: vpavgb 336(%rbp), %xmm2, %xmm2 |
| ; AVX1-NEXT: vmovdqa %xmm2, 64(%rdi) |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 |
| ; AVX1-NEXT: vpavgb 320(%rbp), %xmm2, %xmm2 |
| ; AVX1-NEXT: vmovdqa %xmm2, 48(%rdi) |
| ; AVX1-NEXT: vpavgb 304(%rbp), %xmm1, %xmm1 |
| ; AVX1-NEXT: vmovdqa %xmm1, 32(%rdi) |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX1-NEXT: vpavgb 288(%rbp), %xmm1, %xmm1 |
| ; AVX1-NEXT: vmovdqa %xmm1, 16(%rdi) |
| ; AVX1-NEXT: vpavgb 272(%rbp), %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovdqa %xmm0, (%rdi) |
| ; AVX1-NEXT: movq %rbp, %rsp |
| ; AVX1-NEXT: popq %rbp |
| ; AVX1-NEXT: vzeroupper |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: avg_v512i8_3: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: pushq %rbp |
| ; AVX2-NEXT: movq %rsp, %rbp |
| ; AVX2-NEXT: andq $-32, %rsp |
| ; AVX2-NEXT: subq $32, %rsp |
| ; AVX2-NEXT: movq %rdi, %rax |
| ; AVX2-NEXT: vmovdqa 240(%rbp), %ymm8 |
| ; AVX2-NEXT: vmovdqa 208(%rbp), %ymm9 |
| ; AVX2-NEXT: vmovdqa 176(%rbp), %ymm10 |
| ; AVX2-NEXT: vmovdqa 144(%rbp), %ymm11 |
| ; AVX2-NEXT: vmovdqa 112(%rbp), %ymm12 |
| ; AVX2-NEXT: vmovdqa 80(%rbp), %ymm13 |
| ; AVX2-NEXT: vmovdqa 48(%rbp), %ymm14 |
| ; AVX2-NEXT: vmovdqa 16(%rbp), %ymm15 |
| ; AVX2-NEXT: vpavgb 272(%rbp), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpavgb 304(%rbp), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpavgb 336(%rbp), %ymm2, %ymm2 |
| ; AVX2-NEXT: vpavgb 368(%rbp), %ymm3, %ymm3 |
| ; AVX2-NEXT: vpavgb 400(%rbp), %ymm4, %ymm4 |
| ; AVX2-NEXT: vpavgb 432(%rbp), %ymm5, %ymm5 |
| ; AVX2-NEXT: vpavgb 464(%rbp), %ymm6, %ymm6 |
| ; AVX2-NEXT: vpavgb 496(%rbp), %ymm7, %ymm7 |
| ; AVX2-NEXT: vpavgb 528(%rbp), %ymm15, %ymm15 |
| ; AVX2-NEXT: vpavgb 560(%rbp), %ymm14, %ymm14 |
| ; AVX2-NEXT: vpavgb 592(%rbp), %ymm13, %ymm13 |
| ; AVX2-NEXT: vpavgb 624(%rbp), %ymm12, %ymm12 |
| ; AVX2-NEXT: vpavgb 656(%rbp), %ymm11, %ymm11 |
| ; AVX2-NEXT: vpavgb 688(%rbp), %ymm10, %ymm10 |
| ; AVX2-NEXT: vpavgb 720(%rbp), %ymm9, %ymm9 |
| ; AVX2-NEXT: vpavgb 752(%rbp), %ymm8, %ymm8 |
| ; AVX2-NEXT: vmovdqa %ymm8, 480(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm9, 448(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm10, 416(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm11, 384(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm12, 352(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm13, 320(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm14, 288(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm15, 256(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm7, 224(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm6, 192(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm5, 160(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm4, 128(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm3, 96(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm2, 64(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rdi) |
| ; AVX2-NEXT: movq %rbp, %rsp |
| ; AVX2-NEXT: popq %rbp |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: avg_v512i8_3: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: pushq %rbp |
| ; AVX512F-NEXT: movq %rsp, %rbp |
| ; AVX512F-NEXT: andq $-64, %rsp |
| ; AVX512F-NEXT: subq $64, %rsp |
| ; AVX512F-NEXT: movq %rdi, %rax |
| ; AVX512F-NEXT: vpavgb 16(%rbp), %ymm0, %ymm8 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0 |
| ; AVX512F-NEXT: vpavgb 48(%rbp), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpavgb 80(%rbp), %ymm1, %ymm9 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm1 |
| ; AVX512F-NEXT: vpavgb 112(%rbp), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpavgb 144(%rbp), %ymm2, %ymm10 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm2 |
| ; AVX512F-NEXT: vpavgb 176(%rbp), %ymm2, %ymm2 |
| ; AVX512F-NEXT: vpavgb 208(%rbp), %ymm3, %ymm11 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm3, %ymm3 |
| ; AVX512F-NEXT: vpavgb 240(%rbp), %ymm3, %ymm3 |
| ; AVX512F-NEXT: vpavgb 272(%rbp), %ymm4, %ymm12 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm4, %ymm4 |
| ; AVX512F-NEXT: vpavgb 304(%rbp), %ymm4, %ymm4 |
| ; AVX512F-NEXT: vpavgb 336(%rbp), %ymm5, %ymm13 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm5, %ymm5 |
| ; AVX512F-NEXT: vpavgb 368(%rbp), %ymm5, %ymm5 |
| ; AVX512F-NEXT: vpavgb 400(%rbp), %ymm6, %ymm14 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm6, %ymm6 |
| ; AVX512F-NEXT: vpavgb 432(%rbp), %ymm6, %ymm6 |
| ; AVX512F-NEXT: vpavgb 464(%rbp), %ymm7, %ymm15 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm7, %ymm7 |
| ; AVX512F-NEXT: vpavgb 496(%rbp), %ymm7, %ymm7 |
| ; AVX512F-NEXT: vmovdqa %ymm7, 480(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm15, 448(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm6, 416(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm14, 384(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm5, 352(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm13, 320(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm4, 288(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm12, 256(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm3, 224(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm11, 192(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm2, 160(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm10, 128(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 96(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm9, 64(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rdi) |
| ; AVX512F-NEXT: vmovdqa %ymm8, (%rdi) |
| ; AVX512F-NEXT: movq %rbp, %rsp |
| ; AVX512F-NEXT: popq %rbp |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: avg_v512i8_3: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: pushq %rbp |
| ; AVX512BW-NEXT: movq %rsp, %rbp |
| ; AVX512BW-NEXT: andq $-64, %rsp |
| ; AVX512BW-NEXT: subq $64, %rsp |
| ; AVX512BW-NEXT: movq %rdi, %rax |
| ; AVX512BW-NEXT: vpavgb 16(%rbp), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpavgb 80(%rbp), %zmm1, %zmm1 |
| ; AVX512BW-NEXT: vpavgb 144(%rbp), %zmm2, %zmm2 |
| ; AVX512BW-NEXT: vpavgb 208(%rbp), %zmm3, %zmm3 |
| ; AVX512BW-NEXT: vpavgb 272(%rbp), %zmm4, %zmm4 |
| ; AVX512BW-NEXT: vpavgb 336(%rbp), %zmm5, %zmm5 |
| ; AVX512BW-NEXT: vpavgb 400(%rbp), %zmm6, %zmm6 |
| ; AVX512BW-NEXT: vpavgb 464(%rbp), %zmm7, %zmm7 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, 448(%rdi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, 384(%rdi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, 320(%rdi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, 256(%rdi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, 192(%rdi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%rdi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, 64(%rdi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdi) |
| ; AVX512BW-NEXT: movq %rbp, %rsp |
| ; AVX512BW-NEXT: popq %rbp |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %za = zext <512 x i8> %a to <512 x i16> |
| %zb = zext <512 x i8> %b to <512 x i16> |
| %add = add nuw nsw <512 x i16> %za, %zb |
| %add1 = add nuw nsw <512 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %lshr = lshr <512 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %res = trunc <512 x i16> %lshr to <512 x i8> |
| ret <512 x i8> %res |
| } |
| |
| ; This is not an avgceilu, but its structurally similar and previously caused a crash |
| ; because the constants can't be read with APInt::getZExtValue. |
| define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind { |
| ; SSE2-LABEL: not_avg_v16i8_wide_constants: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movaps (%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa (%rsi), %xmm0 |
| ; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm2 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm3 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm4 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm5 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm6 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm7 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm8 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm10 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm9 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm11 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm12 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm13 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm14 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: movd %eax, %xmm15 |
| ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax |
| ; SSE2-NEXT: decl %eax |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] |
| ; SSE2-NEXT: movd %eax, %xmm2 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,0,0] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,0,0] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,0,0,0] |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] |
| ; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1] |
| ; SSE2-NEXT: pxor %xmm3, %xmm3 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] |
| ; SSE2-NEXT: movapd %xmm4, %xmm5 |
| ; SSE2-NEXT: andpd %xmm1, %xmm5 |
| ; SSE2-NEXT: xorpd %xmm4, %xmm1 |
| ; SSE2-NEXT: psrlw $1, %xmm1 |
| ; SSE2-NEXT: paddw %xmm5, %xmm1 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,0,0,0] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm14[0,0,0,0] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3] |
| ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm9[0],xmm2[1] |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15] |
| ; SSE2-NEXT: movapd %xmm2, %xmm3 |
| ; SSE2-NEXT: andpd %xmm0, %xmm3 |
| ; SSE2-NEXT: xorpd %xmm2, %xmm0 |
| ; SSE2-NEXT: psrlw $1, %xmm0 |
| ; SSE2-NEXT: paddw %xmm3, %xmm0 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] |
| ; SSE2-NEXT: pand %xmm2, %xmm0 |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqu %xmm1, (%rax) |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: not_avg_v16i8_wide_constants: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero |
| ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero |
| ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero |
| ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero |
| ; AVX1-NEXT: vpextrw $7, %xmm3, %edx |
| ; AVX1-NEXT: vpextrw $6, %xmm3, %ecx |
| ; AVX1-NEXT: vpextrw $5, %xmm3, %eax |
| ; AVX1-NEXT: decl %edx |
| ; AVX1-NEXT: vmovd %edx, %xmm4 |
| ; AVX1-NEXT: vpextrw $4, %xmm3, %edx |
| ; AVX1-NEXT: decl %ecx |
| ; AVX1-NEXT: vmovd %ecx, %xmm5 |
| ; AVX1-NEXT: vpextrw $1, %xmm3, %ecx |
| ; AVX1-NEXT: decl %eax |
| ; AVX1-NEXT: vmovd %eax, %xmm6 |
| ; AVX1-NEXT: vpextrw $0, %xmm3, %eax |
| ; AVX1-NEXT: decl %edx |
| ; AVX1-NEXT: vmovd %edx, %xmm7 |
| ; AVX1-NEXT: vpextrw $3, %xmm3, %edx |
| ; AVX1-NEXT: decq %rcx |
| ; AVX1-NEXT: vmovq %rcx, %xmm8 |
| ; AVX1-NEXT: vpextrw $2, %xmm3, %ecx |
| ; AVX1-NEXT: decq %rax |
| ; AVX1-NEXT: vmovq %rax, %xmm3 |
| ; AVX1-NEXT: vpextrw $7, %xmm2, %eax |
| ; AVX1-NEXT: decl %edx |
| ; AVX1-NEXT: vmovd %edx, %xmm9 |
| ; AVX1-NEXT: vpextrw $6, %xmm2, %edx |
| ; AVX1-NEXT: decl %ecx |
| ; AVX1-NEXT: vmovd %ecx, %xmm10 |
| ; AVX1-NEXT: vpextrw $5, %xmm2, %ecx |
| ; AVX1-NEXT: decl %eax |
| ; AVX1-NEXT: vmovd %eax, %xmm11 |
| ; AVX1-NEXT: vpextrw $4, %xmm2, %eax |
| ; AVX1-NEXT: decl %edx |
| ; AVX1-NEXT: vmovd %edx, %xmm12 |
| ; AVX1-NEXT: vpextrw $1, %xmm2, %edx |
| ; AVX1-NEXT: decl %ecx |
| ; AVX1-NEXT: vmovd %ecx, %xmm13 |
| ; AVX1-NEXT: vpextrw $0, %xmm2, %ecx |
| ; AVX1-NEXT: decl %eax |
| ; AVX1-NEXT: vmovd %eax, %xmm14 |
| ; AVX1-NEXT: vpextrw $3, %xmm2, %eax |
| ; AVX1-NEXT: decq %rdx |
| ; AVX1-NEXT: vmovq %rdx, %xmm15 |
| ; AVX1-NEXT: vpextrw $2, %xmm2, %edx |
| ; AVX1-NEXT: decq %rcx |
| ; AVX1-NEXT: vmovq %rcx, %xmm2 |
| ; AVX1-NEXT: decl %eax |
| ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] |
| ; AVX1-NEXT: vmovd %eax, %xmm5 |
| ; AVX1-NEXT: decl %edx |
| ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3] |
| ; AVX1-NEXT: vmovd %edx, %xmm7 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0] |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1] |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7] |
| ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3] |
| ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3] |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,1,1] |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5,6,7] |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7] |
| ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3] |
| ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3] |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0] |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1] |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7] |
| ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] |
| ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3] |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1] |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5,6,7] |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7] |
| ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 |
| ; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm1 |
| ; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0 |
| ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 |
| ; AVX1-NEXT: vpaddw %xmm2, %xmm1, %xmm2 |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255] |
| ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpand %xmm1, %xmm2, %xmm1 |
| ; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX1-NEXT: vzeroupper |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: not_avg_v16i8_wide_constants: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero |
| ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero |
| ; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: not_avg_v16i8_wide_constants: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero |
| ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero |
| ; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero |
| ; AVX512F-NEXT: vpmovdb %zmm0, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: not_avg_v16i8_wide_constants: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero |
| ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero |
| ; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0 |
| ; AVX512BW-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0 |
| ; AVX512BW-NEXT: vpsrlw $1, %ymm0, %ymm0 |
| ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 |
| ; AVX512BW-NEXT: vmovdqu %xmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %1 = load <16 x i8>, ptr %a |
| %2 = load <16 x i8>, ptr %b |
| %3 = zext <16 x i8> %1 to <16 x i128> |
| %4 = zext <16 x i8> %2 to <16 x i128> |
| %5 = add <16 x i128> %3, <i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1> |
| %6 = add <16 x i128> %5, %4 |
| %7 = lshr <16 x i128> %6, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1> |
| %8 = trunc <16 x i128> %7 to <16 x i8> |
| store <16 x i8> %8, ptr undef, align 4 |
| ret void |
| } |
| |
| ; Make sure we don't fail on single element vectors. |
| define <1 x i8> @avg_v1i8(<1 x i8> %x, <1 x i8> %y) { |
| ; CHECK-LABEL: avg_v1i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movzbl %sil, %eax |
| ; CHECK-NEXT: movzbl %dil, %ecx |
| ; CHECK-NEXT: leal 1(%rcx,%rax), %eax |
| ; CHECK-NEXT: shrl %eax |
| ; CHECK-NEXT: # kill: def $al killed $al killed $eax |
| ; CHECK-NEXT: retq |
| %a = zext <1 x i8> %x to <1 x i16> |
| %b = zext <1 x i8> %y to <1 x i16> |
| %c = add <1 x i16> %a, %b |
| %d = add <1 x i16> %c, <i16 1> |
| %e = lshr <1 x i16> %d, <i16 1> |
| %f = trunc <1 x i16> %e to <1 x i8> |
| ret <1 x i8> %f |
| } |
| |
| ; _mm_avg_epu16( _mm_slli_epi16(a, 2), _mm_slli_epi16(b, 2)) |
| define <2 x i64> @PR41316(<2 x i64>, <2 x i64>) { |
| ; SSE2-LABEL: PR41316: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: psllw $2, %xmm0 |
| ; SSE2-NEXT: psllw $2, %xmm1 |
| ; SSE2-NEXT: pavgw %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: PR41316: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpsllw $2, %xmm0, %xmm0 |
| ; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 |
| ; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: retq |
| %3 = bitcast <2 x i64> %0 to <8 x i16> |
| %4 = shl <8 x i16> %3, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> |
| %5 = bitcast <2 x i64> %1 to <8 x i16> |
| %6 = shl <8 x i16> %5, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> |
| %7 = zext <8 x i16> %6 to <8 x i32> |
| %8 = or <8 x i16> %4, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> |
| %9 = zext <8 x i16> %8 to <8 x i32> |
| %10 = add nuw nsw <8 x i32> %9, %7 |
| %11 = lshr <8 x i32> %10, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %12 = trunc <8 x i32> %11 to <8 x i16> |
| %13 = bitcast <8 x i16> %12 to <2 x i64> |
| ret <2 x i64> %13 |
| } |
| |
| ; shuffle(avg(shuffle(),shuffle())) -> avg(shuffle(),shuffle()) |
| define <16 x i8> @fold_avgb_shuffles(<16 x i8> %x, <16 x i8> %y) { |
| ; SSE2-LABEL: fold_avgb_shuffles: |
| ; SSE2: # %bb.0: # %entry |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| ; SSE2-NEXT: pavgb %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: fold_avgb_shuffles: |
| ; AVX: # %bb.0: # %entry |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| entry: |
| %0 = shufflevector <16 x i8> %x, <16 x i8> poison, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7> |
| %1 = shufflevector <16 x i8> %y, <16 x i8> poison, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> |
| %2 = tail call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %0, <16 x i8> %1) |
| %3 = shufflevector <16 x i8> %2, <16 x i8> poison, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3> |
| ret <16 x i8> %3 |
| } |
| declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) |
| |
| define <8 x i16> @fold_avgw_shuffles(<8 x i16> %x, <8 x i16> %y) { |
| ; SSE2-LABEL: fold_avgw_shuffles: |
| ; SSE2: # %bb.0: # %entry |
| ; SSE2-NEXT: pavgw %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: fold_avgw_shuffles: |
| ; AVX: # %bb.0: # %entry |
| ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| entry: |
| %0 = shufflevector <8 x i16> %x, <8 x i16> poison, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1> |
| %1 = shufflevector <8 x i16> %y, <8 x i16> poison, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1> |
| %2 = tail call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %0, <8 x i16> %1) |
| %3 = shufflevector <8 x i16> %2, <8 x i16> poison, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1> |
| ret <8 x i16> %3 |
| } |
| declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) |
| |
| define <8 x i16> @PR52131_pavg_chain(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) { |
| ; SSE2-LABEL: PR52131_pavg_chain: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pavgw %xmm1, %xmm0 |
| ; SSE2-NEXT: pavgw %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: PR52131_pavg_chain: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vpavgw %xmm0, %xmm2, %xmm0 |
| ; AVX-NEXT: retq |
| %i = zext <8 x i16> %a to <8 x i32> |
| %i1 = zext <8 x i16> %b to <8 x i32> |
| %i2 = add nuw nsw <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i3 = add nuw nsw <8 x i32> %i2, %i1 |
| %i4 = lshr <8 x i32> %i3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i5 = and <8 x i32> %i4, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535> |
| %i6 = zext <8 x i16> %c to <8 x i32> |
| %i7 = add nuw nsw <8 x i32> %i6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i8 = add nuw nsw <8 x i32> %i7, %i5 |
| %i9 = lshr <8 x i32> %i8, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i10 = trunc <8 x i32> %i9 to <8 x i16> |
| ret <8 x i16> %i10 |
| } |
| |
| define <8 x i16> @PR52131_pavg_chainlike_but_not_zext(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) { |
| ; SSE2-LABEL: PR52131_pavg_chainlike_but_not_zext: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pavgw %xmm1, %xmm0 |
| ; SSE2-NEXT: pavgw %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: PR52131_pavg_chainlike_but_not_zext: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vpavgw %xmm0, %xmm2, %xmm0 |
| ; AVX-NEXT: retq |
| %i = zext <8 x i16> %a to <8 x i32> |
| %i1 = zext <8 x i16> %b to <8 x i32> |
| %i2 = add nuw nsw <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i3 = add nuw nsw <8 x i32> %i2, %i1 |
| %i4 = lshr <8 x i32> %i3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i5 = and <8 x i32> %i4, <i32 131071, i32 131071, i32 131071, i32 131071, i32 131071, i32 131071, i32 131071, i32 131071> |
| %i6 = zext <8 x i16> %c to <8 x i32> |
| %i7 = add nuw nsw <8 x i32> %i6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i8 = add nuw nsw <8 x i32> %i7, %i5 |
| %i9 = lshr <8 x i32> %i8, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i10 = trunc <8 x i32> %i9 to <8 x i16> |
| ret <8 x i16> %i10 |
| } |
| |
| define <8 x i16> @PR52131_pavg_with_mask(<8 x i32> %a, <8 x i16> %b) { |
| ; SSE2-LABEL: PR52131_pavg_with_mask: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pslld $16, %xmm1 |
| ; SSE2-NEXT: psrad $16, %xmm1 |
| ; SSE2-NEXT: pslld $16, %xmm0 |
| ; SSE2-NEXT: psrad $16, %xmm0 |
| ; SSE2-NEXT: packssdw %xmm1, %xmm0 |
| ; SSE2-NEXT: pavgw %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: PR52131_pavg_with_mask: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 |
| ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgw %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vzeroupper |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: PR52131_pavg_with_mask: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] |
| ; AVX2-NEXT: vpavgw %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: PR52131_pavg_with_mask: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 |
| ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 |
| ; AVX512-NEXT: vpavgw %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %i = and <8 x i32> %a, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535> |
| %i3 = zext <8 x i16> %b to <8 x i32> |
| %i4 = add nuw nsw <8 x i32> %i3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i5 = add nuw nsw <8 x i32> %i4, %i |
| %i6 = lshr <8 x i32> %i5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i7 = trunc <8 x i32> %i6 to <8 x i16> |
| ret <8 x i16> %i7 |
| } |
| |
| define <8 x i16> @PR52131_not_zext_with_constant(<8 x i32> %a) { |
| ; SSE2-LABEL: PR52131_not_zext_with_constant: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pslld $16, %xmm1 |
| ; SSE2-NEXT: psrad $16, %xmm1 |
| ; SSE2-NEXT: pslld $16, %xmm0 |
| ; SSE2-NEXT: psrad $16, %xmm0 |
| ; SSE2-NEXT: packssdw %xmm1, %xmm0 |
| ; SSE2-NEXT: pavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX1-LABEL: PR52131_not_zext_with_constant: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX1-NEXT: vzeroupper |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: PR52131_not_zext_with_constant: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] |
| ; AVX2-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: PR52131_not_zext_with_constant: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 |
| ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 |
| ; AVX512-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %i = and <8 x i32> %a, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535> |
| %i1 = add nuw nsw <8 x i32> %i, <i32 43, i32 43, i32 43, i32 43, i32 43, i32 43, i32 43, i32 43> |
| %i2 = lshr <8 x i32> %i1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| %i3 = trunc <8 x i32> %i2 to <8 x i16> |
| ret <8 x i16> %i3 |
| } |
| |
| define i64 @PR95284(i32 %a0) { |
| ; CHECK-LABEL: PR95284: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: decq %rax |
| ; CHECK-NEXT: shrq %rax |
| ; CHECK-NEXT: incq %rax |
| ; CHECK-NEXT: andq $-2, %rax |
| ; CHECK-NEXT: retq |
| %ext = zext nneg i32 %a0 to i64 |
| %dec = add i64 %ext, -1 |
| %srl = lshr i64 %dec, 1 |
| %inc = add nuw nsw i64 %srl, 1 |
| %res = and i64 %inc, 9223372036854775806 |
| ret i64 %res |
| } |