blob: 137ce2d618834f09d8e2a1c66fe970b417285412 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE2
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2,+avx | FileCheck %s --check-prefixes=X86-AVX,X86-AVX1
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2,+avx,+avx2 | FileCheck %s --check-prefixes=X86-AVX,X86-AVX2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+avx | FileCheck %s --check-prefixes=X64-AVX,X64-AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+avx,+avx2 | FileCheck %s --check-prefixes=X64-AVX,X64-AVX2
; The mask is all-ones, potentially shifted.
;------------------------------------------------------------------------------;
; 128-bit vector; 8-bit elements = 16 elements
;------------------------------------------------------------------------------;
; lshr
define <16 x i8> @test_128_i8_x_16_7_mask_lshr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_lshr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_7_mask_lshr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%t1 = lshr <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_lshr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = lshr <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_lshr_2(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_2:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $2, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_2:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_2:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $2, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_2:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = lshr <16 x i8> %t0, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_lshr_3(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_3:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $3, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_3:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_3:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $3, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_3:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = lshr <16 x i8> %t0, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_lshr_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $4, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = lshr <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_lshr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = lshr <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_lshr_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $4, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = lshr <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_lshr_5(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $5, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_5:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrlw $5, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_5:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = lshr <16 x i8> %t0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_lshr_6(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $6, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_6:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrlw $6, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_6:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = lshr <16 x i8> %t0, <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>
ret <16 x i8> %t1
}
; ashr
define <16 x i8> @test_128_i8_x_16_7_mask_ashr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_ashr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_7_mask_ashr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%t1 = ashr <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_ashr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = ashr <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_ashr_2(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_2:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $2, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_2:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_2:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $2, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_2:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = ashr <16 x i8> %t0, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_ashr_3(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_3:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $3, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_3:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_3:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $3, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_3:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = ashr <16 x i8> %t0, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_ashr_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $4, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = ashr <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; X86-SSE2-NEXT: pxor %xmm1, %xmm0
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; X64-SSE2-NEXT: pxor %xmm1, %xmm0
; X64-SSE2-NEXT: psubb %xmm1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = ashr <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_ashr_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X86-SSE2-NEXT: pxor %xmm1, %xmm0
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $4, %xmm0
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X64-SSE2-NEXT: pxor %xmm1, %xmm0
; X64-SSE2-NEXT: psubb %xmm1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = ashr <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_ashr_5(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $5, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
; X86-SSE2-NEXT: pxor %xmm1, %xmm0
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_5:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrlw $5, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
; X64-SSE2-NEXT: pxor %xmm1, %xmm0
; X64-SSE2-NEXT: psubb %xmm1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_5:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = ashr <16 x i8> %t0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $6, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; X86-SSE2-NEXT: pxor %xmm1, %xmm0
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_6:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrlw $6, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; X64-SSE2-NEXT: pxor %xmm1, %xmm0
; X64-SSE2-NEXT: psubb %xmm1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_6:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = ashr <16 x i8> %t0, <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>
ret <16 x i8> %t1
}
; shl
define <16 x i8> @test_128_i8_x_16_7_mask_shl_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddb %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddb %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_7_mask_shl_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%t1 = shl <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_7_mask_shl_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $4, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_7_mask_shl_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%t1 = shl <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_7_mask_shl_5(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psllw $5, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsllw $5, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_5:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psllw $5, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_7_mask_shl_5:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllw $5, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%t1 = shl <16 x i8> %t0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_7_mask_shl_6(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psllw $6, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsllw $6, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_6:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psllw $6, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_7_mask_shl_6:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllw $6, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%t1 = shl <16 x i8> %t0, <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_shl_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddb %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddb %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_shl_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = shl <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_shl_2(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_2:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $2, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_2:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $2, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_2:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $2, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_shl_2:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $2, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = shl <16 x i8> %t0, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_shl_3(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_3:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $3, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_3:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_3:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $3, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_shl_3:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = shl <16 x i8> %t0, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_28_mask_shl_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $4, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $4, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_28_mask_shl_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
%t1 = shl <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
ret <16 x i8> %t1
}
define <16 x i8> @test_128_i8_x_16_224_mask_shl_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddb %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_shl_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddb %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i8_x_16_224_mask_shl_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
%t1 = shl <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %t1
}
;------------------------------------------------------------------------------;
; 128-bit vector; 16-bit elements = 8 elements
;------------------------------------------------------------------------------;
; lshr
define <8 x i16> @test_128_i16_x_8_127_mask_lshr_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_lshr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_127_mask_lshr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
%t1 = lshr <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_3(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $3, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $3, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = lshr <8 x i16> %t0, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_4(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $4, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = lshr <8 x i16> %t0, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_5(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $5, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $5, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = lshr <8 x i16> %t0, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_6(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $6, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $6, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = lshr <8 x i16> %t0, <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = lshr <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_8(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = lshr <8 x i16> %t0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_9(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_9:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_9:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $9, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_9:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrlw $9, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_9:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlw $9, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = lshr <8 x i16> %t0, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_10(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_10:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_10:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $10, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_10:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrlw $10, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_10:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlw $10, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = lshr <8 x i16> %t0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
ret <8 x i16> %t1
}
; ashr
define <8 x i16> @test_128_i16_x_8_127_mask_ashr_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_ashr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_127_mask_ashr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
%t1 = ashr <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_3(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $3, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $3, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = ashr <8 x i16> %t0, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_4(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $4, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = ashr <8 x i16> %t0, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_5(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $5, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $5, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = ashr <8 x i16> %t0, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_6(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlw $6, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlw $6, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = ashr <8 x i16> %t0, <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psraw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsraw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psraw $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsraw $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = ashr <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_8(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psraw $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsraw $8, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psraw $8, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsraw $8, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = ashr <8 x i16> %t0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_9(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_9:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psraw $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_9:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsraw $9, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_9:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psraw $9, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_9:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsraw $9, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = ashr <8 x i16> %t0, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_10:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psraw $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_10:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsraw $10, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_10:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psraw $10, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_10:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsraw $10, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = ashr <8 x i16> %t0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
ret <8 x i16> %t1
}
; shl
define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddw %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddw %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
%t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_127_mask_shl_8(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_8:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $8, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_8:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $8, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_8:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $8, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
%t1 = shl <8 x i16> %t0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_127_mask_shl_9(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_9:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psllw $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_9:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsllw $9, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_9:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psllw $9, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_9:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllw $9, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
%t1 = shl <8 x i16> %t0, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_127_mask_shl_10(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_10:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psllw $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_10:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsllw $10, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_10:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psllw $10, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_10:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllw $10, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
%t1 = shl <8 x i16> %t0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_3(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_3:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $3, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_3:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_3:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $3, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_3:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = shl <8 x i16> %t0, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_4(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_4:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_4:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_4:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $4, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_4:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = shl <8 x i16> %t0, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_5(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $5, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $5, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_5:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $5, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_5:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $5, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = shl <8 x i16> %t0, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllw $6, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $6, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_6:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllw $6, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_6:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllw $6, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
%t1 = shl <8 x i16> %t0, <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>
ret <8 x i16> %t1
}
define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddw %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddw %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %t1
}
;------------------------------------------------------------------------------;
; 128-bit vector; 32-bit elements = 4 elements
;------------------------------------------------------------------------------;
; lshr
define <4 x i32> @test_128_i32_x_4_32767_mask_lshr_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767>
%t1 = lshr <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_7(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $7, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $7, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $7, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $7, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $7, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $7, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = lshr <4 x i32> %t0, <i32 7, i32 7, i32 7, i32 7>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_8(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $8, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $8, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $8, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = lshr <4 x i32> %t0, <i32 8, i32 8, i32 8, i32 8>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_9(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $9, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $9, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $9, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = lshr <4 x i32> %t0, <i32 9, i32 9, i32 9, i32 9>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_10(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $10, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $10, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $10, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $10, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $10, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = lshr <4 x i32> %t0, <i32 10, i32 10, i32 10, i32 10>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = lshr <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_16(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $16, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = lshr <4 x i32> %t0, <i32 16, i32 16, i32 16, i32 16>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_17(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_17:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrld $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i32_x_4_4294836224_mask_lshr_17:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrld $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_17:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrld $17, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i32_x_4_4294836224_mask_lshr_17:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrld $17, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = lshr <4 x i32> %t0, <i32 17, i32 17, i32 17, i32 17>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_18(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_18:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrld $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i32_x_4_4294836224_mask_lshr_18:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrld $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_18:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrld $18, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i32_x_4_4294836224_mask_lshr_18:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrld $18, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = lshr <4 x i32> %t0, <i32 18, i32 18, i32 18, i32 18>
ret <4 x i32> %t1
}
; ashr
define <4 x i32> @test_128_i32_x_4_32767_mask_ashr_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767>
%t1 = ashr <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_7(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $7, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $7, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $7, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $7, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $7, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $7, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = ashr <4 x i32> %t0, <i32 7, i32 7, i32 7, i32 7>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_8(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $8, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $8, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $8, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = ashr <4 x i32> %t0, <i32 8, i32 8, i32 8, i32 8>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_9(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $9, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $9, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $9, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = ashr <4 x i32> %t0, <i32 9, i32 9, i32 9, i32 9>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_10(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrld $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $10, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrld $10, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrld $10, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrld $10, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrld $10, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = ashr <4 x i32> %t0, <i32 10, i32 10, i32 10, i32 10>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrad $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrad $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = ashr <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_16(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrad $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrad $16, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = ashr <4 x i32> %t0, <i32 16, i32 16, i32 16, i32 16>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_17(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_17:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrad $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i32_x_4_4294836224_mask_ashr_17:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrad $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_17:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrad $17, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i32_x_4_4294836224_mask_ashr_17:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrad $17, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = ashr <4 x i32> %t0, <i32 17, i32 17, i32 17, i32 17>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_18:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrad $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i32_x_4_4294836224_mask_ashr_18:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrad $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_18:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrad $18, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i32_x_4_4294836224_mask_ashr_18:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrad $18, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = ashr <4 x i32> %t0, <i32 18, i32 18, i32 18, i32 18>
ret <4 x i32> %t1
}
; shl
define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddd %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddd %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767>
%t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_32767_mask_shl_16(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_16:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pslld $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_16:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $16, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_16:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpslld $16, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_16:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pslld $16, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_16:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpslld $16, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_16:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpslld $16, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767>
%t1 = shl <4 x i32> %t0, <i32 16, i32 16, i32 16, i32 16>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_32767_mask_shl_17(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_17:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pslld $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i32_x_4_32767_mask_shl_17:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpslld $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_17:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pslld $17, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i32_x_4_32767_mask_shl_17:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpslld $17, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767>
%t1 = shl <4 x i32> %t0, <i32 17, i32 17, i32 17, i32 17>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_32767_mask_shl_18(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_18:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pslld $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i32_x_4_32767_mask_shl_18:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpslld $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_18:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pslld $18, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i32_x_4_32767_mask_shl_18:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpslld $18, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767>
%t1 = shl <4 x i32> %t0, <i32 18, i32 18, i32 18, i32 18>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_7(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pslld $7, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $7, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpslld $7, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pslld $7, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpslld $7, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpslld $7, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = shl <4 x i32> %t0, <i32 7, i32 7, i32 7, i32 7>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_8(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pslld $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $8, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpslld $8, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pslld $8, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpslld $8, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpslld $8, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = shl <4 x i32> %t0, <i32 8, i32 8, i32 8, i32 8>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_9(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pslld $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $9, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpslld $9, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pslld $9, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpslld $9, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpslld $9, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = shl <4 x i32> %t0, <i32 9, i32 9, i32 9, i32 9>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pslld $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $10, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpslld $10, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pslld $10, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpslld $10, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388352,8388352,8388352,8388352]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpslld $10, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 8388352, i32 8388352, i32 8388352, i32 8388352>
%t1 = shl <4 x i32> %t0, <i32 10, i32 10, i32 10, i32 10>
ret <4 x i32> %t1
}
define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddd %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddd %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %t1
}
;------------------------------------------------------------------------------;
; 128-bit vector; 64-bit elements = 2 elements
;------------------------------------------------------------------------------;
; lshr
define <2 x i64> @test_128_i64_x_2_2147483647_mask_lshr_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
%t1 = lshr <2 x i64> %t0, <i64 1, i64 1>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_15(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $15, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $15, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $15, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $15, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = lshr <2 x i64> %t0, <i64 15, i64 15>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_16(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $16, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = lshr <2 x i64> %t0, <i64 16, i64 16>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_17(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $17, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $17, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = lshr <2 x i64> %t0, <i64 17, i64 17>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_18(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $18, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $18, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = lshr <2 x i64> %t0, <i64 18, i64 18>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = lshr <2 x i64> %t0, <i64 1, i64 1>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_32(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $32, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $32, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = lshr <2 x i64> %t0, <i64 32, i64 32>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_33(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_33:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlq $33, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_33:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlq $33, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_33:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrlq $33, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_33:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlq $33, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = lshr <2 x i64> %t0, <i64 33, i64 33>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_34(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_34:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlq $34, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_34:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlq $34, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_34:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrlq $34, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_34:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlq $34, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = lshr <2 x i64> %t0, <i64 34, i64 34>
ret <2 x i64> %t1
}
; ashr
define <2 x i64> @test_128_i64_x_2_2147483647_mask_ashr_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
%t1 = ashr <2 x i64> %t0, <i64 1, i64 1>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_15(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $15, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $15, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $15, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $15, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = ashr <2 x i64> %t0, <i64 15, i64 15>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_16(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $16, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = ashr <2 x i64> %t0, <i64 16, i64 16>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_17(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $17, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $17, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = ashr <2 x i64> %t0, <i64 17, i64 17>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_18(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrlq $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrlq $18, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrlq $18, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = ashr <2 x i64> %t0, <i64 18, i64 18>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psrad $1, %xmm0
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psrad $1, %xmm0
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X64-AVX2-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = ashr <2 x i64> %t0, <i64 1, i64 1>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; X86-SSE2-NEXT: psrad $31, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; X64-SSE2-NEXT: psrad $31, %xmm0
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X64-SSE2-NEXT: movdqa %xmm1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; X64-AVX2-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = ashr <2 x i64> %t0, <i64 32, i64 32>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_33(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_33:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrad $31, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; X86-SSE2-NEXT: psrad $1, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_33:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; X86-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_33:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
; X86-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_33:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqa %xmm0, %xmm1
; X64-SSE2-NEXT: psrad $31, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; X64-SSE2-NEXT: psrad $1, %xmm0
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_33:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; X64-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_33:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
; X64-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; X64-AVX2-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = ashr <2 x i64> %t0, <i64 33, i64 33>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_34:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrad $31, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; X86-SSE2-NEXT: psrad $2, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_34:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; X86-AVX1-NEXT: vpsrad $2, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_34:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
; X86-AVX2-NEXT: vpsrad $2, %xmm0, %xmm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_34:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqa %xmm0, %xmm1
; X64-SSE2-NEXT: psrad $31, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; X64-SSE2-NEXT: psrad $2, %xmm0
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_34:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; X64-AVX1-NEXT: vpsrad $2, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_34:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
; X64-AVX2-NEXT: vpsrad $2, %xmm0, %xmm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; X64-AVX2-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = ashr <2 x i64> %t0, <i64 34, i64 34>
ret <2 x i64> %t1
}
; shl
define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddq %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddq %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
%t1 = shl <2 x i64> %t0, <i64 1, i64 1>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_32(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllq $32, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483647,2147483647,2147483647,2147483647]
; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllq $32, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
%t1 = shl <2 x i64> %t0, <i64 32, i64 32>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_33(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_33:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psllq $33, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_33:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsllq $33, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_33:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psllq $33, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_33:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllq $33, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
%t1 = shl <2 x i64> %t0, <i64 33, i64 33>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_34(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_34:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psllq $34, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_34:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsllq $34, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_34:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psllq $34, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_34:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllq $34, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
%t1 = shl <2 x i64> %t0, <i64 34, i64 34>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_15(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllq $15, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllq $15, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllq $15, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllq $15, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = shl <2 x i64> %t0, <i64 15, i64 15>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_16(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllq $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllq $16, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllq $16, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllq $16, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = shl <2 x i64> %t0, <i64 16, i64 16>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_17(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllq $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllq $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllq $17, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllq $17, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = shl <2 x i64> %t0, <i64 17, i64 17>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: psllq $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllq $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: psllq $18, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsllq $18, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
%t1 = shl <2 x i64> %t0, <i64 18, i64 18>
ret <2 x i64> %t1
}
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: paddq %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: paddq %xmm0, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = shl <2 x i64> %t0, <i64 1, i64 1>
ret <2 x i64> %t1
}