|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 | 
|  | ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 | 
|  |  | 
|  | ; When loading the shift amount from memory, avoid generating the splat. | 
|  |  | 
|  | define void @shift5a(<4 x i32> %val, ptr %dst, ptr %pamt) nounwind { | 
|  | ; X86-LABEL: shift5a: | 
|  | ; X86:       # %bb.0: # %entry | 
|  | ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx | 
|  | ; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X86-NEXT:    pslld %xmm1, %xmm0 | 
|  | ; X86-NEXT:    movdqa %xmm0, (%eax) | 
|  | ; X86-NEXT:    retl | 
|  | ; | 
|  | ; X64-LABEL: shift5a: | 
|  | ; X64:       # %bb.0: # %entry | 
|  | ; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X64-NEXT:    pslld %xmm1, %xmm0 | 
|  | ; X64-NEXT:    movdqa %xmm0, (%rdi) | 
|  | ; X64-NEXT:    retq | 
|  | entry: | 
|  | %amt = load i32, ptr %pamt | 
|  | %tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0 | 
|  | %shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer | 
|  | %shl = shl <4 x i32> %val, %shamt | 
|  | store <4 x i32> %shl, ptr %dst | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  | define void @shift5b(<4 x i32> %val, ptr %dst, ptr %pamt) nounwind { | 
|  | ; X86-LABEL: shift5b: | 
|  | ; X86:       # %bb.0: # %entry | 
|  | ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx | 
|  | ; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X86-NEXT:    psrad %xmm1, %xmm0 | 
|  | ; X86-NEXT:    movdqa %xmm0, (%eax) | 
|  | ; X86-NEXT:    retl | 
|  | ; | 
|  | ; X64-LABEL: shift5b: | 
|  | ; X64:       # %bb.0: # %entry | 
|  | ; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X64-NEXT:    psrad %xmm1, %xmm0 | 
|  | ; X64-NEXT:    movdqa %xmm0, (%rdi) | 
|  | ; X64-NEXT:    retq | 
|  | entry: | 
|  | %amt = load i32, ptr %pamt | 
|  | %tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0 | 
|  | %shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer | 
|  | %shr = ashr <4 x i32> %val, %shamt | 
|  | store <4 x i32> %shr, ptr %dst | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  | define void @shift5c(<4 x i32> %val, ptr %dst, i32 %amt) nounwind { | 
|  | ; X86-LABEL: shift5c: | 
|  | ; X86:       # %bb.0: # %entry | 
|  | ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X86-NEXT:    pslld %xmm1, %xmm0 | 
|  | ; X86-NEXT:    movdqa %xmm0, (%eax) | 
|  | ; X86-NEXT:    retl | 
|  | ; | 
|  | ; X64-LABEL: shift5c: | 
|  | ; X64:       # %bb.0: # %entry | 
|  | ; X64-NEXT:    movd %esi, %xmm1 | 
|  | ; X64-NEXT:    pslld %xmm1, %xmm0 | 
|  | ; X64-NEXT:    movdqa %xmm0, (%rdi) | 
|  | ; X64-NEXT:    retq | 
|  | entry: | 
|  | %tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0 | 
|  | %shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer | 
|  | %shl = shl <4 x i32> %val, %shamt | 
|  | store <4 x i32> %shl, ptr %dst | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  | define void @shift5d(<4 x i32> %val, ptr %dst, i32 %amt) nounwind { | 
|  | ; X86-LABEL: shift5d: | 
|  | ; X86:       # %bb.0: # %entry | 
|  | ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X86-NEXT:    psrad %xmm1, %xmm0 | 
|  | ; X86-NEXT:    movdqa %xmm0, (%eax) | 
|  | ; X86-NEXT:    retl | 
|  | ; | 
|  | ; X64-LABEL: shift5d: | 
|  | ; X64:       # %bb.0: # %entry | 
|  | ; X64-NEXT:    movd %esi, %xmm1 | 
|  | ; X64-NEXT:    psrad %xmm1, %xmm0 | 
|  | ; X64-NEXT:    movdqa %xmm0, (%rdi) | 
|  | ; X64-NEXT:    retq | 
|  | entry: | 
|  | %tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0 | 
|  | %shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer | 
|  | %shr = ashr <4 x i32> %val, %shamt | 
|  | store <4 x i32> %shr, ptr %dst | 
|  | ret void | 
|  | } |