blob: 983c7342603353e9797d1b828c51e770e0cd742c [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
;
; PR6455 'Clear Upper Bits' Patterns
;
define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind {
; SSE2-LABEL: _clearupper2xi64a:
; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper2xi64a:
; SSE42: # %bb.0:
; SSE42-NEXT: xorps %xmm1, %xmm1
; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper2xi64a:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
%x0 = extractelement <2 x i64> %0, i32 0
%x1 = extractelement <2 x i64> %0, i32 1
%trunc0 = trunc i64 %x0 to i32
%trunc1 = trunc i64 %x1 to i32
%ext0 = zext i32 %trunc0 to i64
%ext1 = zext i32 %trunc1 to i64
%v0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
%v1 = insertelement <2 x i64> %v0, i64 %ext1, i32 1
ret <2 x i64> %v1
}
define <4 x i64> @_clearupper4xi64a(<4 x i64>) nounwind {
; SSE2-LABEL: _clearupper4xi64a:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [4294967295,4294967295]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi64a:
; SSE42: # %bb.0:
; SSE42-NEXT: xorps %xmm2, %xmm2
; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SSE42-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi64a:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
%x0 = extractelement <4 x i64> %0, i32 0
%x1 = extractelement <4 x i64> %0, i32 1
%x2 = extractelement <4 x i64> %0, i32 2
%x3 = extractelement <4 x i64> %0, i32 3
%trunc0 = trunc i64 %x0 to i32
%trunc1 = trunc i64 %x1 to i32
%trunc2 = trunc i64 %x2 to i32
%trunc3 = trunc i64 %x3 to i32
%ext0 = zext i32 %trunc0 to i64
%ext1 = zext i32 %trunc1 to i64
%ext2 = zext i32 %trunc2 to i64
%ext3 = zext i32 %trunc3 to i64
%v0 = insertelement <4 x i64> undef, i64 %ext0, i32 0
%v1 = insertelement <4 x i64> %v0, i64 %ext1, i32 1
%v2 = insertelement <4 x i64> %v1, i64 %ext2, i32 2
%v3 = insertelement <4 x i64> %v2, i64 %ext3, i32 3
ret <4 x i64> %v3
}
define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
; SSE2-LABEL: _clearupper4xi32a:
; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi32a:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi32a:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: retq
%x0 = extractelement <4 x i32> %0, i32 0
%x1 = extractelement <4 x i32> %0, i32 1
%x2 = extractelement <4 x i32> %0, i32 2
%x3 = extractelement <4 x i32> %0, i32 3
%trunc0 = trunc i32 %x0 to i16
%trunc1 = trunc i32 %x1 to i16
%trunc2 = trunc i32 %x2 to i16
%trunc3 = trunc i32 %x3 to i16
%ext0 = zext i16 %trunc0 to i32
%ext1 = zext i16 %trunc1 to i32
%ext2 = zext i16 %trunc2 to i32
%ext3 = zext i16 %trunc3 to i32
%v0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
%v1 = insertelement <4 x i32> %v0, i32 %ext1, i32 1
%v2 = insertelement <4 x i32> %v1, i32 %ext2, i32 2
%v3 = insertelement <4 x i32> %v2, i32 %ext3, i32 3
ret <4 x i32> %v3
}
define <8 x i32> @_clearupper8xi32a(<8 x i32>) nounwind {
; SSE2-LABEL: _clearupper8xi32a:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,65535,65535,65535]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper8xi32a:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper8xi32a:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper8xi32a:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
%x0 = extractelement <8 x i32> %0, i32 0
%x1 = extractelement <8 x i32> %0, i32 1
%x2 = extractelement <8 x i32> %0, i32 2
%x3 = extractelement <8 x i32> %0, i32 3
%x4 = extractelement <8 x i32> %0, i32 4
%x5 = extractelement <8 x i32> %0, i32 5
%x6 = extractelement <8 x i32> %0, i32 6
%x7 = extractelement <8 x i32> %0, i32 7
%trunc0 = trunc i32 %x0 to i16
%trunc1 = trunc i32 %x1 to i16
%trunc2 = trunc i32 %x2 to i16
%trunc3 = trunc i32 %x3 to i16
%trunc4 = trunc i32 %x4 to i16
%trunc5 = trunc i32 %x5 to i16
%trunc6 = trunc i32 %x6 to i16
%trunc7 = trunc i32 %x7 to i16
%ext0 = zext i16 %trunc0 to i32
%ext1 = zext i16 %trunc1 to i32
%ext2 = zext i16 %trunc2 to i32
%ext3 = zext i16 %trunc3 to i32
%ext4 = zext i16 %trunc4 to i32
%ext5 = zext i16 %trunc5 to i32
%ext6 = zext i16 %trunc6 to i32
%ext7 = zext i16 %trunc7 to i32
%v0 = insertelement <8 x i32> undef, i32 %ext0, i32 0
%v1 = insertelement <8 x i32> %v0, i32 %ext1, i32 1
%v2 = insertelement <8 x i32> %v1, i32 %ext2, i32 2
%v3 = insertelement <8 x i32> %v2, i32 %ext3, i32 3
%v4 = insertelement <8 x i32> %v3, i32 %ext4, i32 4
%v5 = insertelement <8 x i32> %v4, i32 %ext5, i32 5
%v6 = insertelement <8 x i32> %v5, i32 %ext6, i32 6
%v7 = insertelement <8 x i32> %v6, i32 %ext7, i32 7
ret <8 x i32> %v7
}
define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16a:
; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper8xi16a:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %0, i32 0
%x1 = extractelement <8 x i16> %0, i32 1
%x2 = extractelement <8 x i16> %0, i32 2
%x3 = extractelement <8 x i16> %0, i32 3
%x4 = extractelement <8 x i16> %0, i32 4
%x5 = extractelement <8 x i16> %0, i32 5
%x6 = extractelement <8 x i16> %0, i32 6
%x7 = extractelement <8 x i16> %0, i32 7
%trunc0 = trunc i16 %x0 to i8
%trunc1 = trunc i16 %x1 to i8
%trunc2 = trunc i16 %x2 to i8
%trunc3 = trunc i16 %x3 to i8
%trunc4 = trunc i16 %x4 to i8
%trunc5 = trunc i16 %x5 to i8
%trunc6 = trunc i16 %x6 to i8
%trunc7 = trunc i16 %x7 to i8
%ext0 = zext i8 %trunc0 to i16
%ext1 = zext i8 %trunc1 to i16
%ext2 = zext i8 %trunc2 to i16
%ext3 = zext i8 %trunc3 to i16
%ext4 = zext i8 %trunc4 to i16
%ext5 = zext i8 %trunc5 to i16
%ext6 = zext i8 %trunc6 to i16
%ext7 = zext i8 %trunc7 to i16
%v0 = insertelement <8 x i16> undef, i16 %ext0, i32 0
%v1 = insertelement <8 x i16> %v0, i16 %ext1, i32 1
%v2 = insertelement <8 x i16> %v1, i16 %ext2, i32 2
%v3 = insertelement <8 x i16> %v2, i16 %ext3, i32 3
%v4 = insertelement <8 x i16> %v3, i16 %ext4, i32 4
%v5 = insertelement <8 x i16> %v4, i16 %ext5, i32 5
%v6 = insertelement <8 x i16> %v5, i16 %ext6, i32 6
%v7 = insertelement <8 x i16> %v6, i16 %ext7, i32 7
ret <8 x i16> %v7
}
define <16 x i16> @_clearupper16xi16a(<16 x i16>) nounwind {
; SSE-LABEL: _clearupper16xi16a:
; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi16a:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i16> %0, i32 0
%x1 = extractelement <16 x i16> %0, i32 1
%x2 = extractelement <16 x i16> %0, i32 2
%x3 = extractelement <16 x i16> %0, i32 3
%x4 = extractelement <16 x i16> %0, i32 4
%x5 = extractelement <16 x i16> %0, i32 5
%x6 = extractelement <16 x i16> %0, i32 6
%x7 = extractelement <16 x i16> %0, i32 7
%x8 = extractelement <16 x i16> %0, i32 8
%x9 = extractelement <16 x i16> %0, i32 9
%x10 = extractelement <16 x i16> %0, i32 10
%x11 = extractelement <16 x i16> %0, i32 11
%x12 = extractelement <16 x i16> %0, i32 12
%x13 = extractelement <16 x i16> %0, i32 13
%x14 = extractelement <16 x i16> %0, i32 14
%x15 = extractelement <16 x i16> %0, i32 15
%trunc0 = trunc i16 %x0 to i8
%trunc1 = trunc i16 %x1 to i8
%trunc2 = trunc i16 %x2 to i8
%trunc3 = trunc i16 %x3 to i8
%trunc4 = trunc i16 %x4 to i8
%trunc5 = trunc i16 %x5 to i8
%trunc6 = trunc i16 %x6 to i8
%trunc7 = trunc i16 %x7 to i8
%trunc8 = trunc i16 %x8 to i8
%trunc9 = trunc i16 %x9 to i8
%trunc10 = trunc i16 %x10 to i8
%trunc11 = trunc i16 %x11 to i8
%trunc12 = trunc i16 %x12 to i8
%trunc13 = trunc i16 %x13 to i8
%trunc14 = trunc i16 %x14 to i8
%trunc15 = trunc i16 %x15 to i8
%ext0 = zext i8 %trunc0 to i16
%ext1 = zext i8 %trunc1 to i16
%ext2 = zext i8 %trunc2 to i16
%ext3 = zext i8 %trunc3 to i16
%ext4 = zext i8 %trunc4 to i16
%ext5 = zext i8 %trunc5 to i16
%ext6 = zext i8 %trunc6 to i16
%ext7 = zext i8 %trunc7 to i16
%ext8 = zext i8 %trunc8 to i16
%ext9 = zext i8 %trunc9 to i16
%ext10 = zext i8 %trunc10 to i16
%ext11 = zext i8 %trunc11 to i16
%ext12 = zext i8 %trunc12 to i16
%ext13 = zext i8 %trunc13 to i16
%ext14 = zext i8 %trunc14 to i16
%ext15 = zext i8 %trunc15 to i16
%v0 = insertelement <16 x i16> undef, i16 %ext0, i32 0
%v1 = insertelement <16 x i16> %v0, i16 %ext1, i32 1
%v2 = insertelement <16 x i16> %v1, i16 %ext2, i32 2
%v3 = insertelement <16 x i16> %v2, i16 %ext3, i32 3
%v4 = insertelement <16 x i16> %v3, i16 %ext4, i32 4
%v5 = insertelement <16 x i16> %v4, i16 %ext5, i32 5
%v6 = insertelement <16 x i16> %v5, i16 %ext6, i32 6
%v7 = insertelement <16 x i16> %v6, i16 %ext7, i32 7
%v8 = insertelement <16 x i16> %v7, i16 %ext8, i32 8
%v9 = insertelement <16 x i16> %v8, i16 %ext9, i32 9
%v10 = insertelement <16 x i16> %v9, i16 %ext10, i32 10
%v11 = insertelement <16 x i16> %v10, i16 %ext11, i32 11
%v12 = insertelement <16 x i16> %v11, i16 %ext12, i32 12
%v13 = insertelement <16 x i16> %v12, i16 %ext13, i32 13
%v14 = insertelement <16 x i16> %v13, i16 %ext14, i32 14
%v15 = insertelement <16 x i16> %v14, i16 %ext15, i32 15
ret <16 x i16> %v15
}
define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8a:
; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi8a:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i8> %0, i32 0
%x1 = extractelement <16 x i8> %0, i32 1
%x2 = extractelement <16 x i8> %0, i32 2
%x3 = extractelement <16 x i8> %0, i32 3
%x4 = extractelement <16 x i8> %0, i32 4
%x5 = extractelement <16 x i8> %0, i32 5
%x6 = extractelement <16 x i8> %0, i32 6
%x7 = extractelement <16 x i8> %0, i32 7
%x8 = extractelement <16 x i8> %0, i32 8
%x9 = extractelement <16 x i8> %0, i32 9
%x10 = extractelement <16 x i8> %0, i32 10
%x11 = extractelement <16 x i8> %0, i32 11
%x12 = extractelement <16 x i8> %0, i32 12
%x13 = extractelement <16 x i8> %0, i32 13
%x14 = extractelement <16 x i8> %0, i32 14
%x15 = extractelement <16 x i8> %0, i32 15
%trunc0 = trunc i8 %x0 to i4
%trunc1 = trunc i8 %x1 to i4
%trunc2 = trunc i8 %x2 to i4
%trunc3 = trunc i8 %x3 to i4
%trunc4 = trunc i8 %x4 to i4
%trunc5 = trunc i8 %x5 to i4
%trunc6 = trunc i8 %x6 to i4
%trunc7 = trunc i8 %x7 to i4
%trunc8 = trunc i8 %x8 to i4
%trunc9 = trunc i8 %x9 to i4
%trunc10 = trunc i8 %x10 to i4
%trunc11 = trunc i8 %x11 to i4
%trunc12 = trunc i8 %x12 to i4
%trunc13 = trunc i8 %x13 to i4
%trunc14 = trunc i8 %x14 to i4
%trunc15 = trunc i8 %x15 to i4
%ext0 = zext i4 %trunc0 to i8
%ext1 = zext i4 %trunc1 to i8
%ext2 = zext i4 %trunc2 to i8
%ext3 = zext i4 %trunc3 to i8
%ext4 = zext i4 %trunc4 to i8
%ext5 = zext i4 %trunc5 to i8
%ext6 = zext i4 %trunc6 to i8
%ext7 = zext i4 %trunc7 to i8
%ext8 = zext i4 %trunc8 to i8
%ext9 = zext i4 %trunc9 to i8
%ext10 = zext i4 %trunc10 to i8
%ext11 = zext i4 %trunc11 to i8
%ext12 = zext i4 %trunc12 to i8
%ext13 = zext i4 %trunc13 to i8
%ext14 = zext i4 %trunc14 to i8
%ext15 = zext i4 %trunc15 to i8
%v0 = insertelement <16 x i8> undef, i8 %ext0, i32 0
%v1 = insertelement <16 x i8> %v0, i8 %ext1, i32 1
%v2 = insertelement <16 x i8> %v1, i8 %ext2, i32 2
%v3 = insertelement <16 x i8> %v2, i8 %ext3, i32 3
%v4 = insertelement <16 x i8> %v3, i8 %ext4, i32 4
%v5 = insertelement <16 x i8> %v4, i8 %ext5, i32 5
%v6 = insertelement <16 x i8> %v5, i8 %ext6, i32 6
%v7 = insertelement <16 x i8> %v6, i8 %ext7, i32 7
%v8 = insertelement <16 x i8> %v7, i8 %ext8, i32 8
%v9 = insertelement <16 x i8> %v8, i8 %ext9, i32 9
%v10 = insertelement <16 x i8> %v9, i8 %ext10, i32 10
%v11 = insertelement <16 x i8> %v10, i8 %ext11, i32 11
%v12 = insertelement <16 x i8> %v11, i8 %ext12, i32 12
%v13 = insertelement <16 x i8> %v12, i8 %ext13, i32 13
%v14 = insertelement <16 x i8> %v13, i8 %ext14, i32 14
%v15 = insertelement <16 x i8> %v14, i8 %ext15, i32 15
ret <16 x i8> %v15
}
define <32 x i8> @_clearupper32xi8a(<32 x i8>) nounwind {
; SSE-LABEL: _clearupper32xi8a:
; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper32xi8a:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%x0 = extractelement <32 x i8> %0, i32 0
%x1 = extractelement <32 x i8> %0, i32 1
%x2 = extractelement <32 x i8> %0, i32 2
%x3 = extractelement <32 x i8> %0, i32 3
%x4 = extractelement <32 x i8> %0, i32 4
%x5 = extractelement <32 x i8> %0, i32 5
%x6 = extractelement <32 x i8> %0, i32 6
%x7 = extractelement <32 x i8> %0, i32 7
%x8 = extractelement <32 x i8> %0, i32 8
%x9 = extractelement <32 x i8> %0, i32 9
%x10 = extractelement <32 x i8> %0, i32 10
%x11 = extractelement <32 x i8> %0, i32 11
%x12 = extractelement <32 x i8> %0, i32 12
%x13 = extractelement <32 x i8> %0, i32 13
%x14 = extractelement <32 x i8> %0, i32 14
%x15 = extractelement <32 x i8> %0, i32 15
%x16 = extractelement <32 x i8> %0, i32 16
%x17 = extractelement <32 x i8> %0, i32 17
%x18 = extractelement <32 x i8> %0, i32 18
%x19 = extractelement <32 x i8> %0, i32 19
%x20 = extractelement <32 x i8> %0, i32 20
%x21 = extractelement <32 x i8> %0, i32 21
%x22 = extractelement <32 x i8> %0, i32 22
%x23 = extractelement <32 x i8> %0, i32 23
%x24 = extractelement <32 x i8> %0, i32 24
%x25 = extractelement <32 x i8> %0, i32 25
%x26 = extractelement <32 x i8> %0, i32 26
%x27 = extractelement <32 x i8> %0, i32 27
%x28 = extractelement <32 x i8> %0, i32 28
%x29 = extractelement <32 x i8> %0, i32 29
%x30 = extractelement <32 x i8> %0, i32 30
%x31 = extractelement <32 x i8> %0, i32 31
%trunc0 = trunc i8 %x0 to i4
%trunc1 = trunc i8 %x1 to i4
%trunc2 = trunc i8 %x2 to i4
%trunc3 = trunc i8 %x3 to i4
%trunc4 = trunc i8 %x4 to i4
%trunc5 = trunc i8 %x5 to i4
%trunc6 = trunc i8 %x6 to i4
%trunc7 = trunc i8 %x7 to i4
%trunc8 = trunc i8 %x8 to i4
%trunc9 = trunc i8 %x9 to i4
%trunc10 = trunc i8 %x10 to i4
%trunc11 = trunc i8 %x11 to i4
%trunc12 = trunc i8 %x12 to i4
%trunc13 = trunc i8 %x13 to i4
%trunc14 = trunc i8 %x14 to i4
%trunc15 = trunc i8 %x15 to i4
%trunc16 = trunc i8 %x16 to i4
%trunc17 = trunc i8 %x17 to i4
%trunc18 = trunc i8 %x18 to i4
%trunc19 = trunc i8 %x19 to i4
%trunc20 = trunc i8 %x20 to i4
%trunc21 = trunc i8 %x21 to i4
%trunc22 = trunc i8 %x22 to i4
%trunc23 = trunc i8 %x23 to i4
%trunc24 = trunc i8 %x24 to i4
%trunc25 = trunc i8 %x25 to i4
%trunc26 = trunc i8 %x26 to i4
%trunc27 = trunc i8 %x27 to i4
%trunc28 = trunc i8 %x28 to i4
%trunc29 = trunc i8 %x29 to i4
%trunc30 = trunc i8 %x30 to i4
%trunc31 = trunc i8 %x31 to i4
%ext0 = zext i4 %trunc0 to i8
%ext1 = zext i4 %trunc1 to i8
%ext2 = zext i4 %trunc2 to i8
%ext3 = zext i4 %trunc3 to i8
%ext4 = zext i4 %trunc4 to i8
%ext5 = zext i4 %trunc5 to i8
%ext6 = zext i4 %trunc6 to i8
%ext7 = zext i4 %trunc7 to i8
%ext8 = zext i4 %trunc8 to i8
%ext9 = zext i4 %trunc9 to i8
%ext10 = zext i4 %trunc10 to i8
%ext11 = zext i4 %trunc11 to i8
%ext12 = zext i4 %trunc12 to i8
%ext13 = zext i4 %trunc13 to i8
%ext14 = zext i4 %trunc14 to i8
%ext15 = zext i4 %trunc15 to i8
%ext16 = zext i4 %trunc16 to i8
%ext17 = zext i4 %trunc17 to i8
%ext18 = zext i4 %trunc18 to i8
%ext19 = zext i4 %trunc19 to i8
%ext20 = zext i4 %trunc20 to i8
%ext21 = zext i4 %trunc21 to i8
%ext22 = zext i4 %trunc22 to i8
%ext23 = zext i4 %trunc23 to i8
%ext24 = zext i4 %trunc24 to i8
%ext25 = zext i4 %trunc25 to i8
%ext26 = zext i4 %trunc26 to i8
%ext27 = zext i4 %trunc27 to i8
%ext28 = zext i4 %trunc28 to i8
%ext29 = zext i4 %trunc29 to i8
%ext30 = zext i4 %trunc30 to i8
%ext31 = zext i4 %trunc31 to i8
%v0 = insertelement <32 x i8> undef, i8 %ext0, i32 0
%v1 = insertelement <32 x i8> %v0, i8 %ext1, i32 1
%v2 = insertelement <32 x i8> %v1, i8 %ext2, i32 2
%v3 = insertelement <32 x i8> %v2, i8 %ext3, i32 3
%v4 = insertelement <32 x i8> %v3, i8 %ext4, i32 4
%v5 = insertelement <32 x i8> %v4, i8 %ext5, i32 5
%v6 = insertelement <32 x i8> %v5, i8 %ext6, i32 6
%v7 = insertelement <32 x i8> %v6, i8 %ext7, i32 7
%v8 = insertelement <32 x i8> %v7, i8 %ext8, i32 8
%v9 = insertelement <32 x i8> %v8, i8 %ext9, i32 9
%v10 = insertelement <32 x i8> %v9, i8 %ext10, i32 10
%v11 = insertelement <32 x i8> %v10, i8 %ext11, i32 11
%v12 = insertelement <32 x i8> %v11, i8 %ext12, i32 12
%v13 = insertelement <32 x i8> %v12, i8 %ext13, i32 13
%v14 = insertelement <32 x i8> %v13, i8 %ext14, i32 14
%v15 = insertelement <32 x i8> %v14, i8 %ext15, i32 15
%v16 = insertelement <32 x i8> %v15, i8 %ext16, i32 16
%v17 = insertelement <32 x i8> %v16, i8 %ext17, i32 17
%v18 = insertelement <32 x i8> %v17, i8 %ext18, i32 18
%v19 = insertelement <32 x i8> %v18, i8 %ext19, i32 19
%v20 = insertelement <32 x i8> %v19, i8 %ext20, i32 20
%v21 = insertelement <32 x i8> %v20, i8 %ext21, i32 21
%v22 = insertelement <32 x i8> %v21, i8 %ext22, i32 22
%v23 = insertelement <32 x i8> %v22, i8 %ext23, i32 23
%v24 = insertelement <32 x i8> %v23, i8 %ext24, i32 24
%v25 = insertelement <32 x i8> %v24, i8 %ext25, i32 25
%v26 = insertelement <32 x i8> %v25, i8 %ext26, i32 26
%v27 = insertelement <32 x i8> %v26, i8 %ext27, i32 27
%v28 = insertelement <32 x i8> %v27, i8 %ext28, i32 28
%v29 = insertelement <32 x i8> %v28, i8 %ext29, i32 29
%v30 = insertelement <32 x i8> %v29, i8 %ext30, i32 30
%v31 = insertelement <32 x i8> %v30, i8 %ext31, i32 31
ret <32 x i8> %v31
}
define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
; SSE2-LABEL: _clearupper2xi64b:
; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper2xi64b:
; SSE42: # %bb.0:
; SSE42-NEXT: xorps %xmm1, %xmm1
; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper2xi64b:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
%x32 = bitcast <2 x i64> %0 to <4 x i32>
%r0 = insertelement <4 x i32> %x32, i32 zeroinitializer, i32 1
%r1 = insertelement <4 x i32> %r0, i32 zeroinitializer, i32 3
%r = bitcast <4 x i32> %r1 to <2 x i64>
ret <2 x i64> %r
}
define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind {
; SSE2-LABEL: _clearupper4xi64b:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi64b:
; SSE42: # %bb.0:
; SSE42-NEXT: xorps %xmm2, %xmm2
; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SSE42-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi64b:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
%x32 = bitcast <4 x i64> %0 to <8 x i32>
%r0 = insertelement <8 x i32> %x32, i32 zeroinitializer, i32 1
%r1 = insertelement <8 x i32> %r0, i32 zeroinitializer, i32 3
%r2 = insertelement <8 x i32> %r1, i32 zeroinitializer, i32 5
%r3 = insertelement <8 x i32> %r2, i32 zeroinitializer, i32 7
%r = bitcast <8 x i32> %r3 to <4 x i64>
ret <4 x i64> %r
}
define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
; SSE2-LABEL: _clearupper4xi32b:
; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi32b:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi32b:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: retq
%x16 = bitcast <4 x i32> %0 to <8 x i16>
%r0 = insertelement <8 x i16> %x16, i16 zeroinitializer, i32 1
%r1 = insertelement <8 x i16> %r0, i16 zeroinitializer, i32 3
%r2 = insertelement <8 x i16> %r1, i16 zeroinitializer, i32 5
%r3 = insertelement <8 x i16> %r2, i16 zeroinitializer, i32 7
%r = bitcast <8 x i16> %r3 to <4 x i32>
ret <4 x i32> %r
}
define <8 x i32> @_clearupper8xi32b(<8 x i32>) nounwind {
; SSE2-LABEL: _clearupper8xi32b:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper8xi32b:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper8xi32b:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper8xi32b:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
%x16 = bitcast <8 x i32> %0 to <16 x i16>
%r0 = insertelement <16 x i16> %x16, i16 zeroinitializer, i32 1
%r1 = insertelement <16 x i16> %r0, i16 zeroinitializer, i32 3
%r2 = insertelement <16 x i16> %r1, i16 zeroinitializer, i32 5
%r3 = insertelement <16 x i16> %r2, i16 zeroinitializer, i32 7
%r4 = insertelement <16 x i16> %r3, i16 zeroinitializer, i32 9
%r5 = insertelement <16 x i16> %r4, i16 zeroinitializer, i32 11
%r6 = insertelement <16 x i16> %r5, i16 zeroinitializer, i32 13
%r7 = insertelement <16 x i16> %r6, i16 zeroinitializer, i32 15
%r = bitcast <16 x i16> %r7 to <8 x i32>
ret <8 x i32> %r
}
define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16b:
; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper8xi16b:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x8 = bitcast <8 x i16> %0 to <16 x i8>
%r0 = insertelement <16 x i8> %x8, i8 zeroinitializer, i32 1
%r1 = insertelement <16 x i8> %r0, i8 zeroinitializer, i32 3
%r2 = insertelement <16 x i8> %r1, i8 zeroinitializer, i32 5
%r3 = insertelement <16 x i8> %r2, i8 zeroinitializer, i32 7
%r4 = insertelement <16 x i8> %r3, i8 zeroinitializer, i32 9
%r5 = insertelement <16 x i8> %r4, i8 zeroinitializer, i32 11
%r6 = insertelement <16 x i8> %r5, i8 zeroinitializer, i32 13
%r7 = insertelement <16 x i8> %r6, i8 zeroinitializer, i32 15
%r = bitcast <16 x i8> %r7 to <8 x i16>
ret <8 x i16> %r
}
define <16 x i16> @_clearupper16xi16b(<16 x i16>) nounwind {
; SSE-LABEL: _clearupper16xi16b:
; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi16b:
; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX-NEXT: retq
%x8 = bitcast <16 x i16> %0 to <32 x i8>
%r0 = insertelement <32 x i8> %x8, i8 zeroinitializer, i32 1
%r1 = insertelement <32 x i8> %r0, i8 zeroinitializer, i32 3
%r2 = insertelement <32 x i8> %r1, i8 zeroinitializer, i32 5
%r3 = insertelement <32 x i8> %r2, i8 zeroinitializer, i32 7
%r4 = insertelement <32 x i8> %r3, i8 zeroinitializer, i32 9
%r5 = insertelement <32 x i8> %r4, i8 zeroinitializer, i32 11
%r6 = insertelement <32 x i8> %r5, i8 zeroinitializer, i32 13
%r7 = insertelement <32 x i8> %r6, i8 zeroinitializer, i32 15
%r8 = insertelement <32 x i8> %r7, i8 zeroinitializer, i32 17
%r9 = insertelement <32 x i8> %r8, i8 zeroinitializer, i32 19
%r10 = insertelement <32 x i8> %r9, i8 zeroinitializer, i32 21
%r11 = insertelement <32 x i8> %r10, i8 zeroinitializer, i32 23
%r12 = insertelement <32 x i8> %r11, i8 zeroinitializer, i32 25
%r13 = insertelement <32 x i8> %r12, i8 zeroinitializer, i32 27
%r14 = insertelement <32 x i8> %r13, i8 zeroinitializer, i32 29
%r15 = insertelement <32 x i8> %r14, i8 zeroinitializer, i32 31
%r = bitcast <32 x i8> %r15 to <16 x i16>
ret <16 x i16> %r
}
define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
; SSE2-LABEL: _clearupper16xi8b:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm1, %r10
; SSE2-NEXT: movq %r10, %r8
; SSE2-NEXT: shrq $56, %r8
; SSE2-NEXT: andl $15, %r8d
; SSE2-NEXT: movq %r10, %r9
; SSE2-NEXT: shrq $48, %r9
; SSE2-NEXT: andl $15, %r9d
; SSE2-NEXT: movq %r10, %rsi
; SSE2-NEXT: shrq $40, %rsi
; SSE2-NEXT: andl $15, %esi
; SSE2-NEXT: movq %r10, %r11
; SSE2-NEXT: shrq $32, %r11
; SSE2-NEXT: andl $15, %r11d
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: movq %rax, %rdx
; SSE2-NEXT: shrq $56, %rdx
; SSE2-NEXT: andl $15, %edx
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq $48, %rcx
; SSE2-NEXT: andl $15, %ecx
; SSE2-NEXT: movq %rax, %rdi
; SSE2-NEXT: shrq $40, %rdi
; SSE2-NEXT: andl $15, %edi
; SSE2-NEXT: movq %rax, %rbx
; SSE2-NEXT: shrq $32, %rbx
; SSE2-NEXT: andl $15, %ebx
; SSE2-NEXT: shlq $32, %rbx
; SSE2-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; SSE2-NEXT: orq %rbx, %rax
; SSE2-NEXT: shlq $40, %rdi
; SSE2-NEXT: orq %rax, %rdi
; SSE2-NEXT: shlq $48, %rcx
; SSE2-NEXT: orq %rdi, %rcx
; SSE2-NEXT: shlq $56, %rdx
; SSE2-NEXT: orq %rcx, %rdx
; SSE2-NEXT: shlq $32, %r11
; SSE2-NEXT: andl $252645135, %r10d # imm = 0xF0F0F0F
; SSE2-NEXT: orq %r11, %r10
; SSE2-NEXT: shlq $40, %rsi
; SSE2-NEXT: orq %r10, %rsi
; SSE2-NEXT: shlq $48, %r9
; SSE2-NEXT: orq %rsi, %r9
; SSE2-NEXT: shlq $56, %r8
; SSE2-NEXT: orq %r9, %r8
; SSE2-NEXT: movq %rdx, %xmm0
; SSE2-NEXT: movq %r8, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper16xi8b:
; SSE42: # %bb.0:
; SSE42-NEXT: pushq %rbx
; SSE42-NEXT: pextrq $1, %xmm0, %r10
; SSE42-NEXT: movq %r10, %r8
; SSE42-NEXT: shrq $56, %r8
; SSE42-NEXT: andl $15, %r8d
; SSE42-NEXT: movq %r10, %r9
; SSE42-NEXT: shrq $48, %r9
; SSE42-NEXT: andl $15, %r9d
; SSE42-NEXT: movq %r10, %rsi
; SSE42-NEXT: shrq $40, %rsi
; SSE42-NEXT: andl $15, %esi
; SSE42-NEXT: movq %r10, %r11
; SSE42-NEXT: shrq $32, %r11
; SSE42-NEXT: andl $15, %r11d
; SSE42-NEXT: movq %xmm0, %rax
; SSE42-NEXT: movq %rax, %rdx
; SSE42-NEXT: shrq $56, %rdx
; SSE42-NEXT: andl $15, %edx
; SSE42-NEXT: movq %rax, %rcx
; SSE42-NEXT: shrq $48, %rcx
; SSE42-NEXT: andl $15, %ecx
; SSE42-NEXT: movq %rax, %rdi
; SSE42-NEXT: shrq $40, %rdi
; SSE42-NEXT: andl $15, %edi
; SSE42-NEXT: movq %rax, %rbx
; SSE42-NEXT: shrq $32, %rbx
; SSE42-NEXT: andl $15, %ebx
; SSE42-NEXT: shlq $32, %rbx
; SSE42-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; SSE42-NEXT: orq %rbx, %rax
; SSE42-NEXT: shlq $40, %rdi
; SSE42-NEXT: orq %rax, %rdi
; SSE42-NEXT: shlq $48, %rcx
; SSE42-NEXT: orq %rdi, %rcx
; SSE42-NEXT: shlq $56, %rdx
; SSE42-NEXT: orq %rcx, %rdx
; SSE42-NEXT: shlq $32, %r11
; SSE42-NEXT: andl $252645135, %r10d # imm = 0xF0F0F0F
; SSE42-NEXT: orq %r11, %r10
; SSE42-NEXT: shlq $40, %rsi
; SSE42-NEXT: orq %r10, %rsi
; SSE42-NEXT: shlq $48, %r9
; SSE42-NEXT: orq %rsi, %r9
; SSE42-NEXT: shlq $56, %r8
; SSE42-NEXT: orq %r9, %r8
; SSE42-NEXT: movq %r8, %xmm1
; SSE42-NEXT: movq %rdx, %xmm0
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE42-NEXT: popq %rbx
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper16xi8b:
; AVX: # %bb.0:
; AVX-NEXT: pushq %rbx
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movq -{{[0-9]+}}(%rsp), %r9
; AVX-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
; AVX-NEXT: movq %r9, %r8
; AVX-NEXT: shrq $56, %r8
; AVX-NEXT: andl $15, %r8d
; AVX-NEXT: movq %r9, %r10
; AVX-NEXT: shrq $48, %r10
; AVX-NEXT: andl $15, %r10d
; AVX-NEXT: movq %r9, %rsi
; AVX-NEXT: shrq $40, %rsi
; AVX-NEXT: andl $15, %esi
; AVX-NEXT: movq %r9, %r11
; AVX-NEXT: shrq $32, %r11
; AVX-NEXT: andl $15, %r11d
; AVX-NEXT: movq %rdx, %rdi
; AVX-NEXT: shrq $56, %rdi
; AVX-NEXT: andl $15, %edi
; AVX-NEXT: movq %rdx, %rax
; AVX-NEXT: shrq $48, %rax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: movq %rdx, %rcx
; AVX-NEXT: shrq $40, %rcx
; AVX-NEXT: andl $15, %ecx
; AVX-NEXT: movq %rdx, %rbx
; AVX-NEXT: shrq $32, %rbx
; AVX-NEXT: andl $15, %ebx
; AVX-NEXT: shlq $32, %rbx
; AVX-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
; AVX-NEXT: orq %rbx, %rdx
; AVX-NEXT: shlq $40, %rcx
; AVX-NEXT: orq %rdx, %rcx
; AVX-NEXT: shlq $48, %rax
; AVX-NEXT: orq %rcx, %rax
; AVX-NEXT: shlq $56, %rdi
; AVX-NEXT: orq %rax, %rdi
; AVX-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; AVX-NEXT: shlq $32, %r11
; AVX-NEXT: andl $252645135, %r9d # imm = 0xF0F0F0F
; AVX-NEXT: orq %r11, %r9
; AVX-NEXT: shlq $40, %rsi
; AVX-NEXT: orq %r9, %rsi
; AVX-NEXT: shlq $48, %r10
; AVX-NEXT: orq %rsi, %r10
; AVX-NEXT: shlq $56, %r8
; AVX-NEXT: orq %r10, %r8
; AVX-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX-NEXT: popq %rbx
; AVX-NEXT: retq
%x4 = bitcast <16 x i8> %0 to <32 x i4>
%r0 = insertelement <32 x i4> %x4, i4 zeroinitializer, i32 1
%r1 = insertelement <32 x i4> %r0, i4 zeroinitializer, i32 3
%r2 = insertelement <32 x i4> %r1, i4 zeroinitializer, i32 5
%r3 = insertelement <32 x i4> %r2, i4 zeroinitializer, i32 7
%r4 = insertelement <32 x i4> %r3, i4 zeroinitializer, i32 9
%r5 = insertelement <32 x i4> %r4, i4 zeroinitializer, i32 11
%r6 = insertelement <32 x i4> %r5, i4 zeroinitializer, i32 13
%r7 = insertelement <32 x i4> %r6, i4 zeroinitializer, i32 15
%r8 = insertelement <32 x i4> %r7, i4 zeroinitializer, i32 17
%r9 = insertelement <32 x i4> %r8, i4 zeroinitializer, i32 19
%r10 = insertelement <32 x i4> %r9, i4 zeroinitializer, i32 21
%r11 = insertelement <32 x i4> %r10, i4 zeroinitializer, i32 23
%r12 = insertelement <32 x i4> %r11, i4 zeroinitializer, i32 25
%r13 = insertelement <32 x i4> %r12, i4 zeroinitializer, i32 27
%r14 = insertelement <32 x i4> %r13, i4 zeroinitializer, i32 29
%r15 = insertelement <32 x i4> %r14, i4 zeroinitializer, i32 31
%r = bitcast <32 x i4> %r15 to <16 x i8>
ret <16 x i8> %r
}
define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; SSE2-LABEL: _clearupper32xi8b:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm2, %r10
; SSE2-NEXT: movq %r10, %r8
; SSE2-NEXT: shrq $56, %r8
; SSE2-NEXT: andl $15, %r8d
; SSE2-NEXT: movq %r10, %r9
; SSE2-NEXT: shrq $48, %r9
; SSE2-NEXT: andl $15, %r9d
; SSE2-NEXT: movq %r10, %rsi
; SSE2-NEXT: shrq $40, %rsi
; SSE2-NEXT: andl $15, %esi
; SSE2-NEXT: movq %r10, %r11
; SSE2-NEXT: shrq $32, %r11
; SSE2-NEXT: andl $15, %r11d
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: movq %rax, %rdx
; SSE2-NEXT: shrq $56, %rdx
; SSE2-NEXT: andl $15, %edx
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq $48, %rcx
; SSE2-NEXT: andl $15, %ecx
; SSE2-NEXT: movq %rax, %rdi
; SSE2-NEXT: shrq $40, %rdi
; SSE2-NEXT: andl $15, %edi
; SSE2-NEXT: movq %rax, %rbx
; SSE2-NEXT: shrq $32, %rbx
; SSE2-NEXT: andl $15, %ebx
; SSE2-NEXT: shlq $32, %rbx
; SSE2-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; SSE2-NEXT: orq %rbx, %rax
; SSE2-NEXT: shlq $40, %rdi
; SSE2-NEXT: orq %rax, %rdi
; SSE2-NEXT: shlq $48, %rcx
; SSE2-NEXT: orq %rdi, %rcx
; SSE2-NEXT: shlq $56, %rdx
; SSE2-NEXT: orq %rcx, %rdx
; SSE2-NEXT: shlq $32, %r11
; SSE2-NEXT: andl $252645135, %r10d # imm = 0xF0F0F0F
; SSE2-NEXT: orq %r11, %r10
; SSE2-NEXT: shlq $40, %rsi
; SSE2-NEXT: orq %r10, %rsi
; SSE2-NEXT: shlq $48, %r9
; SSE2-NEXT: orq %rsi, %r9
; SSE2-NEXT: shlq $56, %r8
; SSE2-NEXT: orq %r9, %r8
; SSE2-NEXT: movq %rdx, %xmm0
; SSE2-NEXT: movq %r8, %xmm2
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper32xi8b:
; SSE42: # %bb.0:
; SSE42-NEXT: pushq %rbx
; SSE42-NEXT: pextrq $1, %xmm0, %r10
; SSE42-NEXT: movq %r10, %r8
; SSE42-NEXT: shrq $56, %r8
; SSE42-NEXT: andl $15, %r8d
; SSE42-NEXT: movq %r10, %r9
; SSE42-NEXT: shrq $48, %r9
; SSE42-NEXT: andl $15, %r9d
; SSE42-NEXT: movq %r10, %rsi
; SSE42-NEXT: shrq $40, %rsi
; SSE42-NEXT: andl $15, %esi
; SSE42-NEXT: movq %r10, %r11
; SSE42-NEXT: shrq $32, %r11
; SSE42-NEXT: andl $15, %r11d
; SSE42-NEXT: movq %xmm0, %rax
; SSE42-NEXT: movq %rax, %rdx
; SSE42-NEXT: shrq $56, %rdx
; SSE42-NEXT: andl $15, %edx
; SSE42-NEXT: movq %rax, %rcx
; SSE42-NEXT: shrq $48, %rcx
; SSE42-NEXT: andl $15, %ecx
; SSE42-NEXT: movq %rax, %rdi
; SSE42-NEXT: shrq $40, %rdi
; SSE42-NEXT: andl $15, %edi
; SSE42-NEXT: movq %rax, %rbx
; SSE42-NEXT: shrq $32, %rbx
; SSE42-NEXT: andl $15, %ebx
; SSE42-NEXT: shlq $32, %rbx
; SSE42-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; SSE42-NEXT: orq %rbx, %rax
; SSE42-NEXT: shlq $40, %rdi
; SSE42-NEXT: orq %rax, %rdi
; SSE42-NEXT: shlq $48, %rcx
; SSE42-NEXT: orq %rdi, %rcx
; SSE42-NEXT: shlq $56, %rdx
; SSE42-NEXT: orq %rcx, %rdx
; SSE42-NEXT: shlq $32, %r11
; SSE42-NEXT: andl $252645135, %r10d # imm = 0xF0F0F0F
; SSE42-NEXT: orq %r11, %r10
; SSE42-NEXT: shlq $40, %rsi
; SSE42-NEXT: orq %r10, %rsi
; SSE42-NEXT: shlq $48, %r9
; SSE42-NEXT: orq %rsi, %r9
; SSE42-NEXT: shlq $56, %r8
; SSE42-NEXT: orq %r9, %r8
; SSE42-NEXT: movq %r8, %xmm2
; SSE42-NEXT: movq %rdx, %xmm0
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE42-NEXT: popq %rbx
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper32xi8b:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %r9
; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rcx
; AVX1-NEXT: movq %r9, %r8
; AVX1-NEXT: shrq $56, %r8
; AVX1-NEXT: andl $15, %r8d
; AVX1-NEXT: movq %rcx, %rsi
; AVX1-NEXT: movq %rcx, %rdi
; AVX1-NEXT: movq %rcx, %rdx
; AVX1-NEXT: movq %rcx, %rax
; AVX1-NEXT: shrq $32, %rax
; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
; AVX1-NEXT: orq %rax, %rcx
; AVX1-NEXT: movq %r9, %rax
; AVX1-NEXT: shrq $48, %rax
; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: shrq $40, %rdx
; AVX1-NEXT: andl $15, %edx
; AVX1-NEXT: shlq $40, %rdx
; AVX1-NEXT: orq %rcx, %rdx
; AVX1-NEXT: movq %r9, %rcx
; AVX1-NEXT: shrq $40, %rcx
; AVX1-NEXT: andl $15, %ecx
; AVX1-NEXT: shrq $48, %rdi
; AVX1-NEXT: andl $15, %edi
; AVX1-NEXT: shlq $48, %rdi
; AVX1-NEXT: orq %rdx, %rdi
; AVX1-NEXT: movq %r9, %rdx
; AVX1-NEXT: shrq $32, %rdx
; AVX1-NEXT: andl $15, %edx
; AVX1-NEXT: shrq $56, %rsi
; AVX1-NEXT: andl $15, %esi
; AVX1-NEXT: shlq $56, %rsi
; AVX1-NEXT: orq %rdi, %rsi
; AVX1-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: shlq $32, %rdx
; AVX1-NEXT: andl $252645135, %r9d # imm = 0xF0F0F0F
; AVX1-NEXT: orq %rdx, %r9
; AVX1-NEXT: shlq $40, %rcx
; AVX1-NEXT: orq %r9, %rcx
; AVX1-NEXT: shlq $48, %rax
; AVX1-NEXT: orq %rcx, %rax
; AVX1-NEXT: shlq $56, %r8
; AVX1-NEXT: orq %rax, %r8
; AVX1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %r8
; AVX1-NEXT: movq %rax, %r9
; AVX1-NEXT: movq %rax, %rsi
; AVX1-NEXT: movq %rax, %rdi
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: movl %eax, %edx
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: shrl $8, %eax
; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX1-NEXT: shrl $16, %edx
; AVX1-NEXT: vpinsrb $2, %edx, %xmm1, %xmm1
; AVX1-NEXT: shrl $24, %ecx
; AVX1-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1
; AVX1-NEXT: shrq $32, %rdi
; AVX1-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
; AVX1-NEXT: shrq $40, %rsi
; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2
; AVX1-NEXT: shrq $48, %r9
; AVX1-NEXT: vpinsrb $6, %r9d, %xmm1, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: shrq $56, %r8
; AVX1-NEXT: vpinsrb $7, %r8d, %xmm1, %xmm0
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %ecx
; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $16, %ecx
; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $24, %ecx
; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $40, %rcx
; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $48, %rcx
; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
; AVX1-NEXT: vmovq %xmm2, %rcx
; AVX1-NEXT: shrq $56, %rax
; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %ecx, %eax
; AVX1-NEXT: shrl $8, %eax
; AVX1-NEXT: vmovd %ecx, %xmm1
; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %ecx, %eax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %ecx, %eax
; AVX1-NEXT: shrl $24, %eax
; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX1-NEXT: movq %rcx, %rax
; AVX1-NEXT: shrq $32, %rax
; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX1-NEXT: movq %rcx, %rax
; AVX1-NEXT: shrq $40, %rax
; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX1-NEXT: movq %rcx, %rax
; AVX1-NEXT: shrq $48, %rax
; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: shrq $56, %rcx
; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %ecx
; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $16, %ecx
; AVX1-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $24, %ecx
; AVX1-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $40, %rcx
; AVX1-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $48, %rcx
; AVX1-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
; AVX1-NEXT: shrq $56, %rax
; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper32xi8b:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %r9
; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx
; AVX2-NEXT: movq %r9, %r8
; AVX2-NEXT: shrq $56, %r8
; AVX2-NEXT: andl $15, %r8d
; AVX2-NEXT: movq %rcx, %rsi
; AVX2-NEXT: movq %rcx, %rdi
; AVX2-NEXT: movq %rcx, %rdx
; AVX2-NEXT: movq %rcx, %rax
; AVX2-NEXT: shrq $32, %rax
; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
; AVX2-NEXT: orq %rax, %rcx
; AVX2-NEXT: movq %r9, %rax
; AVX2-NEXT: shrq $48, %rax
; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: shrq $40, %rdx
; AVX2-NEXT: andl $15, %edx
; AVX2-NEXT: shlq $40, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: movq %r9, %rcx
; AVX2-NEXT: shrq $40, %rcx
; AVX2-NEXT: andl $15, %ecx
; AVX2-NEXT: shrq $48, %rdi
; AVX2-NEXT: andl $15, %edi
; AVX2-NEXT: shlq $48, %rdi
; AVX2-NEXT: orq %rdx, %rdi
; AVX2-NEXT: movq %r9, %rdx
; AVX2-NEXT: shrq $32, %rdx
; AVX2-NEXT: andl $15, %edx
; AVX2-NEXT: shrq $56, %rsi
; AVX2-NEXT: andl $15, %esi
; AVX2-NEXT: shlq $56, %rsi
; AVX2-NEXT: orq %rdi, %rsi
; AVX2-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: shlq $32, %rdx
; AVX2-NEXT: andl $252645135, %r9d # imm = 0xF0F0F0F
; AVX2-NEXT: orq %rdx, %r9
; AVX2-NEXT: shlq $40, %rcx
; AVX2-NEXT: orq %r9, %rcx
; AVX2-NEXT: shlq $48, %rax
; AVX2-NEXT: orq %rcx, %rax
; AVX2-NEXT: shlq $56, %r8
; AVX2-NEXT: orq %rax, %r8
; AVX2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %r8
; AVX2-NEXT: movq %rax, %r9
; AVX2-NEXT: movq %rax, %rsi
; AVX2-NEXT: movq %rax, %rdi
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: movl %eax, %edx
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX2-NEXT: shrl $16, %edx
; AVX2-NEXT: vpinsrb $2, %edx, %xmm1, %xmm1
; AVX2-NEXT: shrl $24, %ecx
; AVX2-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1
; AVX2-NEXT: shrq $32, %rdi
; AVX2-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
; AVX2-NEXT: shrq $40, %rsi
; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
; AVX2-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2
; AVX2-NEXT: shrq $48, %r9
; AVX2-NEXT: vpinsrb $6, %r9d, %xmm1, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: shrq $56, %r8
; AVX2-NEXT: vpinsrb $7, %r8d, %xmm1, %xmm0
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $8, %ecx
; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $16, %ecx
; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $24, %ecx
; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $40, %rcx
; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $48, %rcx
; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
; AVX2-NEXT: vmovq %xmm2, %rcx
; AVX2-NEXT: shrq $56, %rax
; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %ecx, %eax
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: vmovd %ecx, %xmm1
; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %ecx, %eax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %ecx, %eax
; AVX2-NEXT: shrl $24, %eax
; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX2-NEXT: movq %rcx, %rax
; AVX2-NEXT: shrq $32, %rax
; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX2-NEXT: movq %rcx, %rax
; AVX2-NEXT: shrq $40, %rax
; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX2-NEXT: movq %rcx, %rax
; AVX2-NEXT: shrq $48, %rax
; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm2, %rax
; AVX2-NEXT: shrq $56, %rcx
; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $8, %ecx
; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $16, %ecx
; AVX2-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $24, %ecx
; AVX2-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $40, %rcx
; AVX2-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $48, %rcx
; AVX2-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
; AVX2-NEXT: shrq $56, %rax
; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%x4 = bitcast <32 x i8> %0 to <64 x i4>
%r0 = insertelement <64 x i4> %x4, i4 zeroinitializer, i32 1
%r1 = insertelement <64 x i4> %r0, i4 zeroinitializer, i32 3
%r2 = insertelement <64 x i4> %r1, i4 zeroinitializer, i32 5
%r3 = insertelement <64 x i4> %r2, i4 zeroinitializer, i32 7
%r4 = insertelement <64 x i4> %r3, i4 zeroinitializer, i32 9
%r5 = insertelement <64 x i4> %r4, i4 zeroinitializer, i32 11
%r6 = insertelement <64 x i4> %r5, i4 zeroinitializer, i32 13
%r7 = insertelement <64 x i4> %r6, i4 zeroinitializer, i32 15
%r8 = insertelement <64 x i4> %r7, i4 zeroinitializer, i32 17
%r9 = insertelement <64 x i4> %r8, i4 zeroinitializer, i32 19
%r10 = insertelement <64 x i4> %r9, i4 zeroinitializer, i32 21
%r11 = insertelement <64 x i4> %r10, i4 zeroinitializer, i32 23
%r12 = insertelement <64 x i4> %r11, i4 zeroinitializer, i32 25
%r13 = insertelement <64 x i4> %r12, i4 zeroinitializer, i32 27
%r14 = insertelement <64 x i4> %r13, i4 zeroinitializer, i32 29
%r15 = insertelement <64 x i4> %r14, i4 zeroinitializer, i32 31
%r16 = insertelement <64 x i4> %r15, i4 zeroinitializer, i32 33
%r17 = insertelement <64 x i4> %r16, i4 zeroinitializer, i32 35
%r18 = insertelement <64 x i4> %r17, i4 zeroinitializer, i32 37
%r19 = insertelement <64 x i4> %r18, i4 zeroinitializer, i32 39
%r20 = insertelement <64 x i4> %r19, i4 zeroinitializer, i32 41
%r21 = insertelement <64 x i4> %r20, i4 zeroinitializer, i32 43
%r22 = insertelement <64 x i4> %r21, i4 zeroinitializer, i32 45
%r23 = insertelement <64 x i4> %r22, i4 zeroinitializer, i32 47
%r24 = insertelement <64 x i4> %r23, i4 zeroinitializer, i32 49
%r25 = insertelement <64 x i4> %r24, i4 zeroinitializer, i32 51
%r26 = insertelement <64 x i4> %r25, i4 zeroinitializer, i32 53
%r27 = insertelement <64 x i4> %r26, i4 zeroinitializer, i32 55
%r28 = insertelement <64 x i4> %r27, i4 zeroinitializer, i32 57
%r29 = insertelement <64 x i4> %r28, i4 zeroinitializer, i32 59
%r30 = insertelement <64 x i4> %r29, i4 zeroinitializer, i32 61
%r31 = insertelement <64 x i4> %r30, i4 zeroinitializer, i32 63
%r = bitcast <64 x i4> %r15 to <32 x i8>
ret <32 x i8> %r
}
define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
; SSE2-LABEL: _clearupper2xi64c:
; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper2xi64c:
; SSE42: # %bb.0:
; SSE42-NEXT: xorps %xmm1, %xmm1
; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper2xi64c:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
%r = and <2 x i64> <i64 4294967295, i64 4294967295>, %0
ret <2 x i64> %r
}
define <4 x i64> @_clearupper4xi64c(<4 x i64>) nounwind {
; SSE2-LABEL: _clearupper4xi64c:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi64c:
; SSE42: # %bb.0:
; SSE42-NEXT: xorps %xmm2, %xmm2
; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SSE42-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi64c:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
%r = and <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>, %0
ret <4 x i64> %r
}
define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
; SSE2-LABEL: _clearupper4xi32c:
; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi32c:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi32c:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: retq
%r = and <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>, %0
ret <4 x i32> %r
}
define <8 x i32> @_clearupper8xi32c(<8 x i32>) nounwind {
; SSE2-LABEL: _clearupper8xi32c:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper8xi32c:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper8xi32c:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper8xi32c:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
%r = and <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>, %0
ret <8 x i32> %r
}
define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16c:
; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper8xi16c:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%r = and <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
ret <8 x i16> %r
}
define <16 x i16> @_clearupper16xi16c(<16 x i16>) nounwind {
; SSE-LABEL: _clearupper16xi16c:
; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi16c:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%r = and <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
ret <16 x i16> %r
}
define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8c:
; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi8c:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%r = and <16 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
ret <16 x i8> %r
}
define <32 x i8> @_clearupper32xi8c(<32 x i8>) nounwind {
; SSE-LABEL: _clearupper32xi8c:
; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper32xi8c:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%r = and <32 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
ret <32 x i8> %r
}