blob: 538b8ed10f25b96f83f348c3b01420f7d5d80acb [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=X64,X64-SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define i32 @t(ptr %val) nounwind {
; X32-SSE2-LABEL: t:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,2,3]
; X32-SSE2-NEXT: movd %xmm0, %eax
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,2,3]
; X64-SSSE3-NEXT: movd %xmm0, %eax
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: t:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movl 8(%rdi), %eax
; X64-AVX-NEXT: retq
%tmp2 = load <2 x i64>, ptr %val, align 16 ; <<2 x i64>> [#uses=1]
%tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1]
ret i32 %tmp4
}
; Case where extractelement of load ends up as undef.
; (Making sure this doesn't crash.)
define i32 @t2(ptr %xp) {
; X32-SSE2-LABEL: t2:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: retl
;
; X64-LABEL: t2:
; X64: # %bb.0:
; X64-NEXT: retq
%x = load <8 x i32>, ptr %xp
%Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
%y = extractelement <8 x i32> %Shuff68, i32 0
ret i32 %y
}
; This case could easily end up inf-looping in the DAG combiner due to an
; low alignment load of the vector which prevents us from reliably forming a
; narrow load.
define void @t3(ptr %a0) {
; X32-SSE2-LABEL: t3:
; X32-SSE2: # %bb.0: # %bb
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movups (%eax), %xmm0
; X32-SSE2-NEXT: movhps %xmm0, (%eax)
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t3:
; X64-SSSE3: # %bb.0: # %bb
; X64-SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-SSSE3-NEXT: movsd %xmm0, (%rax)
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: t3:
; X64-AVX: # %bb.0: # %bb
; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-AVX-NEXT: vmovsd %xmm0, (%rax)
; X64-AVX-NEXT: retq
bb:
%tmp13 = load <2 x double>, ptr %a0, align 1
%.sroa.3.24.vec.extract = extractelement <2 x double> %tmp13, i32 1
store double %.sroa.3.24.vec.extract, ptr undef, align 8
ret void
}
; Case where a load is unary shuffled, then bitcast (to a type with the same
; number of elements) before extractelement.
; This is testing for an assertion - the extraction was assuming that the undef
; second shuffle operand was a post-bitcast type instead of a pre-bitcast type.
define i64 @t4(ptr %a) {
; X32-SSE2-LABEL: t4:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movdqa (%eax), %xmm0
; X32-SSE2-NEXT: movd %xmm0, %eax
; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X32-SSE2-NEXT: movd %xmm0, %edx
; X32-SSE2-NEXT: retl
;
; X64-LABEL: t4:
; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: retq
%b = load <2 x double>, ptr %a, align 16
%c = shufflevector <2 x double> %b, <2 x double> %b, <2 x i32> <i32 1, i32 0>
%d = bitcast <2 x double> %c to <2 x i64>
%e = extractelement <2 x i64> %d, i32 1
ret i64 %e
}
; Don't extract from a volatile.
define void @t5(ptr%a0, ptr%a1) {
; X32-SSE2-LABEL: t5:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: movaps (%ecx), %xmm0
; X32-SSE2-NEXT: movhps %xmm0, (%eax)
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t5:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: movaps (%rdi), %xmm0
; X64-SSSE3-NEXT: movhps %xmm0, (%rsi)
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: t5:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX-NEXT: vmovhps %xmm0, (%rsi)
; X64-AVX-NEXT: retq
%vecload = load volatile <2 x double>, ptr %a0, align 16
%vecext = extractelement <2 x double> %vecload, i32 1
store volatile double %vecext, ptr %a1, align 8
ret void
}
; Check for multiuse.
define float @t6(ptr%a0) {
; X32-SSE2-LABEL: t6:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: pushl %eax
; X32-SSE2-NEXT: .cfi_def_cfa_offset 8
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movaps (%eax), %xmm0
; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X32-SSE2-NEXT: xorps %xmm1, %xmm1
; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-SSE2-NEXT: andps %xmm1, %xmm2
; X32-SSE2-NEXT: andnps %xmm0, %xmm1
; X32-SSE2-NEXT: orps %xmm2, %xmm1
; X32-SSE2-NEXT: movss %xmm1, (%esp)
; X32-SSE2-NEXT: flds (%esp)
; X32-SSE2-NEXT: popl %eax
; X32-SSE2-NEXT: .cfi_def_cfa_offset 4
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t6:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: movshdup {{.*#+}} xmm1 = mem[1,1,3,3]
; X64-SSSE3-NEXT: xorps %xmm0, %xmm0
; X64-SSSE3-NEXT: cmpeqss %xmm1, %xmm0
; X64-SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X64-SSSE3-NEXT: andps %xmm0, %xmm2
; X64-SSSE3-NEXT: andnps %xmm1, %xmm0
; X64-SSSE3-NEXT: orps %xmm2, %xmm0
; X64-SSSE3-NEXT: retq
;
; X64-AVX1-LABEL: t6:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX1-NEXT: vcmpeqss %xmm1, %xmm0, %xmm1
; X64-AVX1-NEXT: vblendvps %xmm1, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: t6:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX2-NEXT: vcmpeqss %xmm1, %xmm0, %xmm1
; X64-AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
; X64-AVX2-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%vecload = load <8 x float>, ptr %a0, align 32
%vecext = extractelement <8 x float> %vecload, i32 1
%cmp = fcmp oeq float %vecext, 0.000000e+00
%cond = select i1 %cmp, float 1.000000e+00, float %vecext
ret float %cond
}
define void @PR43971(ptr%a0, ptr%a1) {
; X32-SSE2-LABEL: PR43971:
; X32-SSE2: # %bb.0: # %entry
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: movaps 16(%ecx), %xmm0
; X32-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X32-SSE2-NEXT: xorps %xmm1, %xmm1
; X32-SSE2-NEXT: cmpltss %xmm0, %xmm1
; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-SSE2-NEXT: andps %xmm1, %xmm2
; X32-SSE2-NEXT: andnps %xmm0, %xmm1
; X32-SSE2-NEXT: orps %xmm2, %xmm1
; X32-SSE2-NEXT: movss %xmm1, (%eax)
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: PR43971:
; X64-SSSE3: # %bb.0: # %entry
; X64-SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-SSSE3-NEXT: xorps %xmm1, %xmm1
; X64-SSSE3-NEXT: cmpltss %xmm0, %xmm1
; X64-SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X64-SSSE3-NEXT: andps %xmm1, %xmm2
; X64-SSSE3-NEXT: andnps %xmm0, %xmm1
; X64-SSSE3-NEXT: orps %xmm2, %xmm1
; X64-SSSE3-NEXT: movss %xmm1, (%rsi)
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: PR43971:
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vcmpltss %xmm0, %xmm1, %xmm1
; X64-AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X64-AVX-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; X64-AVX-NEXT: vmovss %xmm0, (%rsi)
; X64-AVX-NEXT: retq
entry:
%0 = load <8 x float>, ptr %a0, align 32
%vecext = extractelement <8 x float> %0, i32 6
%cmp = fcmp ogt float %vecext, 0.000000e+00
%1 = load float, ptr %a1, align 4
%cond = select i1 %cmp, float %1, float %vecext
store float %cond, ptr %a1, align 4
ret void
}
define float @PR43971_1(ptr%a0) nounwind {
; X32-SSE2-LABEL: PR43971_1:
; X32-SSE2: # %bb.0: # %entry
; X32-SSE2-NEXT: pushl %eax
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movaps (%eax), %xmm0
; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X32-SSE2-NEXT: xorps %xmm1, %xmm1
; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-SSE2-NEXT: andps %xmm1, %xmm2
; X32-SSE2-NEXT: andnps %xmm0, %xmm1
; X32-SSE2-NEXT: orps %xmm2, %xmm1
; X32-SSE2-NEXT: movss %xmm1, (%esp)
; X32-SSE2-NEXT: flds (%esp)
; X32-SSE2-NEXT: popl %eax
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: PR43971_1:
; X64-SSSE3: # %bb.0: # %entry
; X64-SSSE3-NEXT: movshdup {{.*#+}} xmm1 = mem[1,1,3,3]
; X64-SSSE3-NEXT: xorps %xmm0, %xmm0
; X64-SSSE3-NEXT: cmpeqss %xmm1, %xmm0
; X64-SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X64-SSSE3-NEXT: andps %xmm0, %xmm2
; X64-SSSE3-NEXT: andnps %xmm1, %xmm0
; X64-SSSE3-NEXT: orps %xmm2, %xmm0
; X64-SSSE3-NEXT: retq
;
; X64-AVX1-LABEL: PR43971_1:
; X64-AVX1: # %bb.0: # %entry
; X64-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX1-NEXT: vcmpeqss %xmm1, %xmm0, %xmm1
; X64-AVX1-NEXT: vblendvps %xmm1, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: PR43971_1:
; X64-AVX2: # %bb.0: # %entry
; X64-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX2-NEXT: vcmpeqss %xmm1, %xmm0, %xmm1
; X64-AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
; X64-AVX2-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
entry:
%0 = load <8 x float>, ptr %a0, align 32
%vecext = extractelement <8 x float> %0, i32 1
%cmp = fcmp oeq float %vecext, 0.000000e+00
%cond = select i1 %cmp, float 1.000000e+00, float %vecext
ret float %cond
}
; Test for bad extractions from a VBROADCAST_LOAD of the <2 x i16> non-uniform constant bitcast as <4 x i32>.
define void @subextract_broadcast_load_constant(ptr nocapture %0, ptr nocapture %1, ptr nocapture %2) nounwind {
; X32-SSE2-LABEL: subextract_broadcast_load_constant:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-SSE2-NEXT: movl $-1583308898, (%edx) # imm = 0xA1A09F9E
; X32-SSE2-NEXT: movw $-24674, (%ecx) # imm = 0x9F9E
; X32-SSE2-NEXT: movw $-24160, (%eax) # imm = 0xA1A0
; X32-SSE2-NEXT: retl
;
; X64-LABEL: subextract_broadcast_load_constant:
; X64: # %bb.0:
; X64-NEXT: movl $-1583308898, (%rdi) # imm = 0xA1A09F9E
; X64-NEXT: movw $-24674, (%rsi) # imm = 0x9F9E
; X64-NEXT: movw $-24160, (%rdx) # imm = 0xA1A0
; X64-NEXT: retq
store i8 -98, ptr %0, align 1
%4 = getelementptr inbounds i8, ptr %0, i64 1
store i8 -97, ptr %4, align 1
%5 = getelementptr inbounds i8, ptr %0, i64 2
store i8 -96, ptr %5, align 1
%6 = getelementptr inbounds i8, ptr %0, i64 3
store i8 -95, ptr %6, align 1
%7 = load <2 x i16>, ptr %0, align 4
%8 = extractelement <2 x i16> %7, i32 0
store i16 %8, ptr %1, align 2
%9 = extractelement <2 x i16> %7, i32 1
store i16 %9, ptr %2, align 2
ret void
}
; A scalar load is favored over a XMM->GPR register transfer in this example.
define i32 @multi_use_load_scalarization(ptr %p) nounwind {
; X32-SSE2-LABEL: multi_use_load_scalarization:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: movl (%ecx), %eax
; X32-SSE2-NEXT: movdqu (%ecx), %xmm0
; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE2-NEXT: psubd %xmm1, %xmm0
; X32-SSE2-NEXT: movdqa %xmm0, (%ecx)
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: multi_use_load_scalarization:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: movl (%rdi), %eax
; X64-SSSE3-NEXT: movdqu (%rdi), %xmm0
; X64-SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSSE3-NEXT: psubd %xmm1, %xmm0
; X64-SSSE3-NEXT: movdqa %xmm0, (%rdi)
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: multi_use_load_scalarization:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movl (%rdi), %eax
; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovdqa %xmm0, (%rdi)
; X64-AVX-NEXT: retq
%v = load <4 x i32>, ptr %p, align 1
%v1 = add <4 x i32> %v, <i32 1, i32 1, i32 1, i32 1>
store <4 x i32> %v1, ptr %p
%r = extractelement <4 x i32> %v, i64 0
ret i32 %r
}
define i32 @multi_use_volatile_load_scalarization(ptr %p) nounwind {
; X32-SSE2-LABEL: multi_use_volatile_load_scalarization:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: movdqu (%ecx), %xmm0
; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE2-NEXT: movd %xmm0, %eax
; X32-SSE2-NEXT: psubd %xmm1, %xmm0
; X32-SSE2-NEXT: movdqa %xmm0, (%ecx)
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: multi_use_volatile_load_scalarization:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: movdqu (%rdi), %xmm0
; X64-SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSSE3-NEXT: movd %xmm0, %eax
; X64-SSSE3-NEXT: psubd %xmm1, %xmm0
; X64-SSSE3-NEXT: movdqa %xmm0, (%rdi)
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: multi_use_volatile_load_scalarization:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm1
; X64-AVX-NEXT: vmovdqa %xmm1, (%rdi)
; X64-AVX-NEXT: vmovd %xmm0, %eax
; X64-AVX-NEXT: retq
%v = load volatile <4 x i32>, ptr %p, align 1
%v1 = add <4 x i32> %v, <i32 1, i32 1, i32 1, i32 1>
store <4 x i32> %v1, ptr %p
%r = extractelement <4 x i32> %v, i64 0
ret i32 %r
}
; This test is reduced from a C source example that showed a miscompile:
; https://github.com/llvm/llvm-project/issues/53695
; The scalarized loads from 'zero' in the AVX asm must occur before
; the vector store to 'zero' overwrites the values.
; If compiled to a binary, this test should return 0 if correct.
@n1 = local_unnamed_addr global <8 x i32> <i32 0, i32 42, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0>, align 32
@zero = internal unnamed_addr global <8 x i32> zeroinitializer, align 32
define i32 @main() nounwind {
; X32-SSE2-LABEL: main:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: pushl %ebp
; X32-SSE2-NEXT: movl %esp, %ebp
; X32-SSE2-NEXT: pushl %esi
; X32-SSE2-NEXT: andl $-32, %esp
; X32-SSE2-NEXT: subl $64, %esp
; X32-SSE2-NEXT: movdqa zero, %xmm0
; X32-SSE2-NEXT: movaps n1+16, %xmm1
; X32-SSE2-NEXT: movaps n1, %xmm2
; X32-SSE2-NEXT: movaps %xmm2, zero
; X32-SSE2-NEXT: movaps %xmm1, zero+16
; X32-SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
; X32-SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; X32-SSE2-NEXT: movaps %xmm1, (%esp)
; X32-SSE2-NEXT: movdqa (%esp), %xmm1
; X32-SSE2-NEXT: movaps {{[0-9]+}}(%esp), %xmm2
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
; X32-SSE2-NEXT: movd %xmm2, %eax
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
; X32-SSE2-NEXT: movd %xmm2, %ecx
; X32-SSE2-NEXT: xorl %edx, %edx
; X32-SSE2-NEXT: divl %ecx
; X32-SSE2-NEXT: movl %eax, %ecx
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X32-SSE2-NEXT: movd %xmm0, %eax
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
; X32-SSE2-NEXT: movd %xmm0, %esi
; X32-SSE2-NEXT: xorl %edx, %edx
; X32-SSE2-NEXT: divl %esi
; X32-SSE2-NEXT: addl %ecx, %eax
; X32-SSE2-NEXT: leal -4(%ebp), %esp
; X32-SSE2-NEXT: popl %esi
; X32-SSE2-NEXT: popl %ebp
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: main:
; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: pushq %rbp
; X64-SSSE3-NEXT: movq %rsp, %rbp
; X64-SSSE3-NEXT: andq $-32, %rsp
; X64-SSSE3-NEXT: subq $64, %rsp
; X64-SSSE3-NEXT: movdqa zero(%rip), %xmm0
; X64-SSSE3-NEXT: movq n1@GOTPCREL(%rip), %rax
; X64-SSSE3-NEXT: movaps (%rax), %xmm1
; X64-SSSE3-NEXT: movaps 16(%rax), %xmm2
; X64-SSSE3-NEXT: movaps %xmm1, zero(%rip)
; X64-SSSE3-NEXT: movaps %xmm2, zero+16(%rip)
; X64-SSSE3-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
; X64-SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
; X64-SSSE3-NEXT: movaps %xmm1, (%rsp)
; X64-SSSE3-NEXT: movdqa (%rsp), %xmm1
; X64-SSSE3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2
; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
; X64-SSSE3-NEXT: movd %xmm2, %eax
; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
; X64-SSSE3-NEXT: movd %xmm2, %ecx
; X64-SSSE3-NEXT: xorl %edx, %edx
; X64-SSSE3-NEXT: divl %ecx
; X64-SSSE3-NEXT: movl %eax, %ecx
; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X64-SSSE3-NEXT: movd %xmm0, %eax
; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
; X64-SSSE3-NEXT: movd %xmm0, %esi
; X64-SSSE3-NEXT: xorl %edx, %edx
; X64-SSSE3-NEXT: divl %esi
; X64-SSSE3-NEXT: addl %ecx, %eax
; X64-SSSE3-NEXT: movq %rbp, %rsp
; X64-SSSE3-NEXT: popq %rbp
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: main:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: pushq %rbp
; X64-AVX-NEXT: movq %rsp, %rbp
; X64-AVX-NEXT: andq $-32, %rsp
; X64-AVX-NEXT: subq $64, %rsp
; X64-AVX-NEXT: movq n1@GOTPCREL(%rip), %rax
; X64-AVX-NEXT: vmovaps (%rax), %ymm0
; X64-AVX-NEXT: movl zero+4(%rip), %ecx
; X64-AVX-NEXT: movl zero+8(%rip), %eax
; X64-AVX-NEXT: vmovaps %ymm0, zero(%rip)
; X64-AVX-NEXT: vbroadcastss {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
; X64-AVX-NEXT: vmovaps %ymm0, (%rsp)
; X64-AVX-NEXT: vmovaps (%rsp), %ymm0
; X64-AVX-NEXT: vextractps $2, %xmm0, %esi
; X64-AVX-NEXT: xorl %edx, %edx
; X64-AVX-NEXT: divl %esi
; X64-AVX-NEXT: movl %eax, %esi
; X64-AVX-NEXT: vextractps $1, %xmm0, %edi
; X64-AVX-NEXT: movl %ecx, %eax
; X64-AVX-NEXT: xorl %edx, %edx
; X64-AVX-NEXT: divl %edi
; X64-AVX-NEXT: addl %esi, %eax
; X64-AVX-NEXT: movq %rbp, %rsp
; X64-AVX-NEXT: popq %rbp
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
%stackptr = alloca <8 x i32>, align 32
%z = load <8 x i32>, ptr @zero, align 32
%t1 = load <8 x i32>, ptr @n1, align 32
store <8 x i32> %t1, ptr @zero, align 32
store volatile <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>, ptr %stackptr, align 32
%stackload = load volatile <8 x i32>, ptr %stackptr, align 32
%div = udiv <8 x i32> %z, %stackload
%e1 = extractelement <8 x i32> %div, i64 1
%e2 = extractelement <8 x i32> %div, i64 2
%r = add i32 %e1, %e2
ret i32 %r
}