| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-SLOW |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-FAST-ALL |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-FAST-PERLANE |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX,AVX512 |
| |
| ; fold (sra 0, x) -> 0 |
| define <4 x i32> @combine_vec_ashr_zero(<4 x i32> %x) { |
| ; SSE-LABEL: combine_vec_ashr_zero: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: xorps %xmm0, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec_ashr_zero: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = ashr <4 x i32> zeroinitializer, %x |
| ret <4 x i32> %1 |
| } |
| |
| ; fold (sra -1, x) -> -1 |
| define <4 x i32> @combine_vec_ashr_allones(<4 x i32> %x) { |
| ; SSE-LABEL: combine_vec_ashr_allones: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec_ashr_allones: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x |
| ret <4 x i32> %1 |
| } |
| |
| ; fold (sra x, c >= size(x)) -> undef |
| define <4 x i32> @combine_vec_ashr_outofrange0(<4 x i32> %x) { |
| ; CHECK-LABEL: combine_vec_ashr_outofrange0: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: retq |
| %1 = ashr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33> |
| ret <4 x i32> %1 |
| } |
| |
| define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) { |
| ; CHECK-LABEL: combine_vec_ashr_outofrange1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: retq |
| %1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36> |
| ret <4 x i32> %1 |
| } |
| |
| define <4 x i32> @combine_vec_ashr_outofrange2(<4 x i32> %x) { |
| ; CHECK-LABEL: combine_vec_ashr_outofrange2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: retq |
| %1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 undef> |
| ret <4 x i32> %1 |
| } |
| |
| ; fold (sra x, 0) -> x |
| define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) { |
| ; CHECK-LABEL: combine_vec_ashr_by_zero: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: retq |
| %1 = ashr <4 x i32> %x, zeroinitializer |
| ret <4 x i32> %1 |
| } |
| |
| ; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) |
| define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) { |
| ; SSE-LABEL: combine_vec_ashr_ashr0: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: psrad $6, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec_ashr_ashr0: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpsrad $6, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2> |
| %2 = ashr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4> |
| ret <4 x i32> %2 |
| } |
| |
| define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) { |
| ; SSE2-LABEL: combine_vec_ashr_ashr1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: psrad $10, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: psrad $8, %xmm2 |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: psrad $6, %xmm1 |
| ; SSE2-NEXT: psrad $4, %xmm0 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3] |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec_ashr_ashr1: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: psrad $10, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: psrad $6, %xmm2 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: psrad $8, %xmm1 |
| ; SSE41-NEXT: psrad $4, %xmm0 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec_ashr_ashr1: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = ashr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3> |
| %2 = ashr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7> |
| ret <4 x i32> %2 |
| } |
| |
| define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) { |
| ; SSE-LABEL: combine_vec_ashr_ashr2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: psrad $31, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec_ashr_ashr2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = ashr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20> |
| %2 = ashr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28> |
| ret <4 x i32> %2 |
| } |
| |
| define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) { |
| ; SSE2-LABEL: combine_vec_ashr_ashr3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: psrad $27, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: psrad $31, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1] |
| ; SSE2-NEXT: psrad $15, %xmm0 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] |
| ; SSE2-NEXT: movaps %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec_ashr_ashr3: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: psrad $27, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: psrad $15, %xmm2 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE41-NEXT: psrad $31, %xmm0 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec_ashr_ashr3: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = ashr <4 x i32> %x, <i32 1, i32 5, i32 50, i32 27> |
| %2 = ashr <4 x i32> %1, <i32 33, i32 10, i32 33, i32 0> |
| ret <4 x i32> %2 |
| } |
| |
| ; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))). |
| define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) { |
| ; SSE2-LABEL: combine_vec_ashr_trunc_and: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2] |
| ; SSE2-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: psrad %xmm2, %xmm3 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: psrad %xmm4, %xmm2 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE2-NEXT: psrad %xmm3, %xmm4 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] |
| ; SSE2-NEXT: psrad %xmm1, %xmm0 |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3] |
| ; SSE2-NEXT: movaps %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec_ashr_trunc_and: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2] |
| ; SSE41-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE41-NEXT: psrad %xmm2, %xmm3 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE41-NEXT: psrad %xmm4, %xmm5 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE41-NEXT: psrad %xmm1, %xmm3 |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] |
| ; SSE41-NEXT: psrad %xmm1, %xmm0 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_and: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2 |
| ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2] |
| ; AVX2-SLOW-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 |
| ; AVX2-SLOW-NEXT: vpsravd %xmm1, %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vzeroupper |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-ALL-LABEL: combine_vec_ashr_trunc_and: |
| ; AVX2-FAST-ALL: # %bb.0: |
| ; AVX2-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,2,4,6,0,0,0,0] |
| ; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm2, %ymm1 |
| ; AVX2-FAST-ALL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 |
| ; AVX2-FAST-ALL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 |
| ; AVX2-FAST-ALL-NEXT: vzeroupper |
| ; AVX2-FAST-ALL-NEXT: retq |
| ; |
| ; AVX2-FAST-PERLANE-LABEL: combine_vec_ashr_trunc_and: |
| ; AVX2-FAST-PERLANE: # %bb.0: |
| ; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm1, %xmm2 |
| ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2] |
| ; AVX2-FAST-PERLANE-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpsravd %xmm1, %xmm0, %xmm0 |
| ; AVX2-FAST-PERLANE-NEXT: vzeroupper |
| ; AVX2-FAST-PERLANE-NEXT: retq |
| ; |
| ; AVX512-LABEL: combine_vec_ashr_trunc_and: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovqd %ymm1, %xmm1 |
| ; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 |
| ; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535> |
| %2 = trunc <4 x i64> %1 to <4 x i32> |
| %3 = ashr <4 x i32> %x, %2 |
| ret <4 x i32> %3 |
| } |
| |
| ; fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2)) |
| ; if c1 is equal to the number of bits the trunc removes |
| define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) { |
| ; SSE2-LABEL: combine_vec_ashr_trunc_lshr: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] |
| ; SSE2-NEXT: movaps %xmm0, %xmm1 |
| ; SSE2-NEXT: psrad $3, %xmm1 |
| ; SSE2-NEXT: movaps %xmm0, %xmm2 |
| ; SSE2-NEXT: psrad $2, %xmm2 |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1] |
| ; SSE2-NEXT: movaps %xmm0, %xmm1 |
| ; SSE2-NEXT: psrad $1, %xmm1 |
| ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3] |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec_ashr_trunc_lshr: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] |
| ; SSE41-NEXT: movaps %xmm0, %xmm2 |
| ; SSE41-NEXT: psrad $2, %xmm2 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7] |
| ; SSE41-NEXT: psrad $1, %xmm0 |
| ; SSE41-NEXT: psrad $3, %xmm1 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7] |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_lshr: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] |
| ; AVX2-SLOW-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vzeroupper |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-ALL-LABEL: combine_vec_ashr_trunc_lshr: |
| ; AVX2-FAST-ALL: # %bb.0: |
| ; AVX2-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} xmm1 = [1,3,5,7] |
| ; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0 |
| ; AVX2-FAST-ALL-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-FAST-ALL-NEXT: vzeroupper |
| ; AVX2-FAST-ALL-NEXT: retq |
| ; |
| ; AVX2-FAST-PERLANE-LABEL: combine_vec_ashr_trunc_lshr: |
| ; AVX2-FAST-PERLANE: # %bb.0: |
| ; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] |
| ; AVX2-FAST-PERLANE-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-FAST-PERLANE-NEXT: vzeroupper |
| ; AVX2-FAST-PERLANE-NEXT: retq |
| ; |
| ; AVX512-LABEL: combine_vec_ashr_trunc_lshr: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsrlq $32, %ymm0, %ymm0 |
| ; AVX512-NEXT: vpmovqd %ymm0, %xmm0 |
| ; AVX512-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32> |
| %2 = trunc <4 x i64> %1 to <4 x i32> |
| %3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3> |
| ret <4 x i32> %3 |
| } |
| |
| define <16 x i8> @combine_vec_ashr_trunc_lshr_splat(<16 x i32> %x) { |
| ; SSE-LABEL: combine_vec_ashr_trunc_lshr_splat: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: psrad $26, %xmm3 |
| ; SSE-NEXT: psrad $26, %xmm2 |
| ; SSE-NEXT: packssdw %xmm3, %xmm2 |
| ; SSE-NEXT: psrad $26, %xmm1 |
| ; SSE-NEXT: psrad $26, %xmm0 |
| ; SSE-NEXT: packssdw %xmm1, %xmm0 |
| ; SSE-NEXT: packsswb %xmm2, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX2-LABEL: combine_vec_ashr_trunc_lshr_splat: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsrad $26, %ymm1, %ymm1 |
| ; AVX2-NEXT: vpsrad $26, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: combine_vec_ashr_trunc_lshr_splat: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsrad $26, %zmm0, %zmm0 |
| ; AVX512-NEXT: vpmovdb %zmm0, %xmm0 |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = lshr <16 x i32> %x, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24> |
| %2 = trunc <16 x i32> %1 to <16 x i8> |
| %3 = ashr <16 x i8> %2, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2> |
| ret <16 x i8> %3 |
| } |
| |
| ; fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2)) |
| ; if c1 is equal to the number of bits the trunc removes |
| define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) { |
| ; SSE2-LABEL: combine_vec_ashr_trunc_ashr: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] |
| ; SSE2-NEXT: movaps %xmm0, %xmm1 |
| ; SSE2-NEXT: psrad $3, %xmm1 |
| ; SSE2-NEXT: movaps %xmm0, %xmm2 |
| ; SSE2-NEXT: psrad $2, %xmm2 |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1] |
| ; SSE2-NEXT: movaps %xmm0, %xmm1 |
| ; SSE2-NEXT: psrad $1, %xmm1 |
| ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3] |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec_ashr_trunc_ashr: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] |
| ; SSE41-NEXT: movaps %xmm0, %xmm2 |
| ; SSE41-NEXT: psrad $2, %xmm2 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7] |
| ; SSE41-NEXT: psrad $1, %xmm0 |
| ; SSE41-NEXT: psrad $3, %xmm1 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7] |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_ashr: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] |
| ; AVX2-SLOW-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vzeroupper |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-ALL-LABEL: combine_vec_ashr_trunc_ashr: |
| ; AVX2-FAST-ALL: # %bb.0: |
| ; AVX2-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} xmm1 = [1,3,5,7] |
| ; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0 |
| ; AVX2-FAST-ALL-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-FAST-ALL-NEXT: vzeroupper |
| ; AVX2-FAST-ALL-NEXT: retq |
| ; |
| ; AVX2-FAST-PERLANE-LABEL: combine_vec_ashr_trunc_ashr: |
| ; AVX2-FAST-PERLANE: # %bb.0: |
| ; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] |
| ; AVX2-FAST-PERLANE-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-FAST-PERLANE-NEXT: vzeroupper |
| ; AVX2-FAST-PERLANE-NEXT: retq |
| ; |
| ; AVX512-LABEL: combine_vec_ashr_trunc_ashr: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsrlq $32, %ymm0, %ymm0 |
| ; AVX512-NEXT: vpmovqd %ymm0, %xmm0 |
| ; AVX512-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = ashr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32> |
| %2 = trunc <4 x i64> %1 to <4 x i32> |
| %3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3> |
| ret <4 x i32> %3 |
| } |
| |
| define <8 x i16> @combine_vec_ashr_trunc_ashr_splat(<8 x i32> %x) { |
| ; SSE-LABEL: combine_vec_ashr_trunc_ashr_splat: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: psrad $19, %xmm1 |
| ; SSE-NEXT: psrad $19, %xmm0 |
| ; SSE-NEXT: packssdw %xmm1, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX2-LABEL: combine_vec_ashr_trunc_ashr_splat: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsrad $19, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: combine_vec_ashr_trunc_ashr_splat: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsrad $19, %ymm0, %ymm0 |
| ; AVX512-NEXT: vpmovdw %ymm0, %xmm0 |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = ashr <8 x i32> %x, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> |
| %2 = trunc <8 x i32> %1 to <8 x i16> |
| %3 = ashr <8 x i16> %2, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> |
| ret <8 x i16> %3 |
| } |
| |
| ; If the sign bit is known to be zero, switch this to a SRL. |
| define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) { |
| ; SSE2-LABEL: combine_vec_ashr_positive: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: psrld %xmm2, %xmm3 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: psrld %xmm4, %xmm2 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE2-NEXT: psrld %xmm3, %xmm4 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] |
| ; SSE2-NEXT: psrld %xmm1, %xmm0 |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3] |
| ; SSE2-NEXT: movaps %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec_ashr_positive: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE41-NEXT: psrld %xmm2, %xmm3 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE41-NEXT: psrld %xmm4, %xmm5 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE41-NEXT: psrld %xmm1, %xmm3 |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] |
| ; SSE41-NEXT: psrld %xmm1, %xmm0 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec_ashr_positive: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = and <4 x i32> %x, <i32 15, i32 255, i32 4095, i32 65535> |
| %2 = ashr <4 x i32> %1, %y |
| ret <4 x i32> %2 |
| } |
| |
| define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) { |
| ; SSE-LABEL: combine_vec_ashr_positive_splat: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: xorps %xmm0, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec_ashr_positive_splat: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = and <4 x i32> %x, <i32 1023, i32 1023, i32 1023, i32 1023> |
| %2 = ashr <4 x i32> %1, <i32 10, i32 10, i32 10, i32 10> |
| ret <4 x i32> %2 |
| } |
| |
| define <8 x i16> @combine_vec8i16_ashr_clamped(<8 x i16> %x, <8 x i16> %y) { |
| ; SSE2-LABEL: combine_vec8i16_ashr_clamped: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE2-NEXT: psubusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 |
| ; SSE2-NEXT: psubw %xmm2, %xmm1 |
| ; SSE2-NEXT: psllw $12, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE2-NEXT: psraw $15, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: psraw $8, %xmm0 |
| ; SSE2-NEXT: pand %xmm2, %xmm0 |
| ; SSE2-NEXT: por %xmm3, %xmm0 |
| ; SSE2-NEXT: paddw %xmm1, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE2-NEXT: psraw $15, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: psraw $4, %xmm0 |
| ; SSE2-NEXT: pand %xmm2, %xmm0 |
| ; SSE2-NEXT: por %xmm3, %xmm0 |
| ; SSE2-NEXT: paddw %xmm1, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE2-NEXT: psraw $15, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: psraw $2, %xmm0 |
| ; SSE2-NEXT: pand %xmm2, %xmm0 |
| ; SSE2-NEXT: por %xmm3, %xmm0 |
| ; SSE2-NEXT: paddw %xmm1, %xmm1 |
| ; SSE2-NEXT: psraw $15, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: psraw $1, %xmm0 |
| ; SSE2-NEXT: pand %xmm1, %xmm0 |
| ; SSE2-NEXT: por %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec8i16_ashr_clamped: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: pminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: psllw $12, %xmm0 |
| ; SSE41-NEXT: psllw $4, %xmm1 |
| ; SSE41-NEXT: por %xmm1, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE41-NEXT: psraw $8, %xmm3 |
| ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 |
| ; SSE41-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE41-NEXT: psraw $4, %xmm3 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 |
| ; SSE41-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE41-NEXT: psraw $2, %xmm3 |
| ; SSE41-NEXT: paddw %xmm1, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 |
| ; SSE41-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE41-NEXT: psraw $1, %xmm3 |
| ; SSE41-NEXT: paddw %xmm1, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 |
| ; SSE41-NEXT: movdqa %xmm2, %xmm0 |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX2-LABEL: combine_vec8i16_ashr_clamped: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 |
| ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero |
| ; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: combine_vec8i16_ashr_clamped: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsravw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %1 = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %y, <8 x i16> <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>) |
| %2 = ashr <8 x i16> %x, %1 |
| ret <8 x i16> %2 |
| } |
| |
| define <4 x i32> @combine_vec4i32_ashr_clamped(<4 x i32> %x, <4 x i32> %y) { |
| ; SSE2-LABEL: combine_vec4i32_ashr_clamped: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSE2-NEXT: pxor %xmm1, %xmm2 |
| ; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE2-NEXT: pandn %xmm1, %xmm3 |
| ; SSE2-NEXT: psrld $27, %xmm2 |
| ; SSE2-NEXT: por %xmm3, %xmm2 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: psrad %xmm1, %xmm3 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: psrad %xmm4, %xmm1 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE2-NEXT: psrad %xmm3, %xmm4 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] |
| ; SSE2-NEXT: psrad %xmm2, %xmm0 |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] |
| ; SSE2-NEXT: movaps %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec4i32_ashr_clamped: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE41-NEXT: psrad %xmm2, %xmm3 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE41-NEXT: psrad %xmm4, %xmm5 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE41-NEXT: psrad %xmm1, %xmm3 |
| ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] |
| ; SSE41-NEXT: psrad %xmm1, %xmm0 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX-LABEL: combine_vec4i32_ashr_clamped: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %y, <4 x i32> <i32 31, i32 31, i32 31, i32 31>) |
| %2 = ashr <4 x i32> %x, %1 |
| ret <4 x i32> %2 |
| } |
| |
| define <4 x i64> @combine_vec4i64_ashr_clamped(<4 x i64> %x, <4 x i64> %y) { |
| ; SSE2-LABEL: combine_vec4i64_ashr_clamped: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456] |
| ; SSE2-NEXT: movdqa %xmm3, %xmm4 |
| ; SSE2-NEXT: pxor %xmm5, %xmm4 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2] |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483711,2147483711,2147483711,2147483711] |
| ; SSE2-NEXT: movdqa %xmm7, %xmm8 |
| ; SSE2-NEXT: pcmpgtd %xmm6, %xmm8 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] |
| ; SSE2-NEXT: pcmpeqd %xmm5, %xmm4 |
| ; SSE2-NEXT: pand %xmm8, %xmm4 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [63,63] |
| ; SSE2-NEXT: pand %xmm4, %xmm3 |
| ; SSE2-NEXT: pandn %xmm6, %xmm4 |
| ; SSE2-NEXT: por %xmm3, %xmm4 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE2-NEXT: pxor %xmm5, %xmm3 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2] |
| ; SSE2-NEXT: pcmpgtd %xmm8, %xmm7 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] |
| ; SSE2-NEXT: pcmpeqd %xmm5, %xmm3 |
| ; SSE2-NEXT: pand %xmm7, %xmm3 |
| ; SSE2-NEXT: pand %xmm3, %xmm2 |
| ; SSE2-NEXT: pandn %xmm6, %xmm3 |
| ; SSE2-NEXT: por %xmm2, %xmm3 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] |
| ; SSE2-NEXT: movdqa %xmm2, %xmm5 |
| ; SSE2-NEXT: psrlq %xmm3, %xmm5 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3] |
| ; SSE2-NEXT: movdqa %xmm2, %xmm7 |
| ; SSE2-NEXT: psrlq %xmm6, %xmm7 |
| ; SSE2-NEXT: movsd {{.*#+}} xmm7 = xmm5[0],xmm7[1] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE2-NEXT: psrlq %xmm3, %xmm5 |
| ; SSE2-NEXT: psrlq %xmm6, %xmm0 |
| ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1] |
| ; SSE2-NEXT: xorpd %xmm7, %xmm0 |
| ; SSE2-NEXT: psubq %xmm7, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE2-NEXT: psrlq %xmm4, %xmm3 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3] |
| ; SSE2-NEXT: psrlq %xmm5, %xmm2 |
| ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] |
| ; SSE2-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE2-NEXT: psrlq %xmm4, %xmm3 |
| ; SSE2-NEXT: psrlq %xmm5, %xmm1 |
| ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1] |
| ; SSE2-NEXT: xorpd %xmm2, %xmm1 |
| ; SSE2-NEXT: psubq %xmm2, %xmm1 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: combine_vec4i64_ashr_clamped: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456] |
| ; SSE41-NEXT: movdqa %xmm3, %xmm6 |
| ; SSE41-NEXT: pxor %xmm7, %xmm6 |
| ; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259519,9223372039002259519] |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2] |
| ; SSE41-NEXT: pcmpeqd %xmm8, %xmm6 |
| ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483711,2147483711,2147483711,2147483711] |
| ; SSE41-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE41-NEXT: pcmpgtd %xmm9, %xmm0 |
| ; SSE41-NEXT: pand %xmm6, %xmm0 |
| ; SSE41-NEXT: movapd {{.*#+}} xmm9 = [63,63] |
| ; SSE41-NEXT: movapd %xmm9, %xmm6 |
| ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm6 |
| ; SSE41-NEXT: pxor %xmm2, %xmm7 |
| ; SSE41-NEXT: pcmpeqd %xmm7, %xmm8 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2] |
| ; SSE41-NEXT: pcmpgtd %xmm0, %xmm5 |
| ; SSE41-NEXT: pand %xmm8, %xmm5 |
| ; SSE41-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm9 |
| ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: psrlq %xmm9, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm9[2,3,2,3] |
| ; SSE41-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE41-NEXT: psrlq %xmm3, %xmm5 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm4, %xmm2 |
| ; SSE41-NEXT: psrlq %xmm9, %xmm2 |
| ; SSE41-NEXT: psrlq %xmm3, %xmm4 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2,3],xmm4[4,5,6,7] |
| ; SSE41-NEXT: pxor %xmm5, %xmm4 |
| ; SSE41-NEXT: psubq %xmm5, %xmm4 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: psrlq %xmm6, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3] |
| ; SSE41-NEXT: psrlq %xmm3, %xmm0 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] |
| ; SSE41-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE41-NEXT: psrlq %xmm6, %xmm2 |
| ; SSE41-NEXT: psrlq %xmm3, %xmm1 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE41-NEXT: pxor %xmm0, %xmm1 |
| ; SSE41-NEXT: psubq %xmm0, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX2-LABEL: combine_vec4i64_ashr_clamped: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] |
| ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3 |
| ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775870,9223372036854775870,9223372036854775870,9223372036854775870] |
| ; AVX2-NEXT: vpcmpgtq %ymm4, %ymm3, %ymm3 |
| ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [63,63,63,63] |
| ; AVX2-NEXT: vblendvpd %ymm3, %ymm4, %ymm1, %ymm1 |
| ; AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2 |
| ; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0 |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: combine_vec4i64_ashr_clamped: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsravq %ymm1, %ymm0, %ymm0 |
| ; AVX512-NEXT: retq |
| %1 = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %y, <4 x i64> <i64 63, i64 63, i64 63, i64 63>) |
| %2 = ashr <4 x i64> %x, %1 |
| ret <4 x i64> %2 |
| } |