blob: 3b74e041f98d94a0807793dc737ad3cf60903ebb [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86,X86-NOBMI
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+bmi2 | FileCheck %s --check-prefixes=X86,X86-BMI
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64,X64-NOBMI
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2 | FileCheck %s --check-prefixes=X64,X64-BMI
; PR1198
define i64 @foo(i64 %x, i64 %y) nounwind {
; X86-LABEL: foo:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: mull %ebp
; X86-NEXT: movl %edx, %ebx
; X86-NEXT: movl %esi, %eax
; X86-NEXT: mull %ebp
; X86-NEXT: movl %edx, %ebp
; X86-NEXT: movl %eax, %esi
; X86-NEXT: addl %ebx, %esi
; X86-NEXT: adcl $0, %ebp
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: mull %edi
; X86-NEXT: movl %edx, %ebx
; X86-NEXT: addl %esi, %eax
; X86-NEXT: adcl %ebp, %ebx
; X86-NEXT: setb %al
; X86-NEXT: movzbl %al, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: mull %edi
; X86-NEXT: movl %edx, %esi
; X86-NEXT: movl %eax, %ebp
; X86-NEXT: addl %ebx, %ebp
; X86-NEXT: adcl %ecx, %esi
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: mull %ecx
; X86-NEXT: movl %edx, %edi
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: mull %ecx
; X86-NEXT: addl %ebx, %eax
; X86-NEXT: adcl %edi, %edx
; X86-NEXT: addl %ebp, %eax
; X86-NEXT: adcl %esi, %edx
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: foo:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rax
; X64-NEXT: retq
%tmp0 = zext i64 %x to i128
%tmp1 = zext i64 %y to i128
%tmp2 = mul i128 %tmp0, %tmp1
%tmp7 = zext i32 64 to i128
%tmp3 = lshr i128 %tmp2, %tmp7
%tmp4 = trunc i128 %tmp3 to i64
ret i64 %tmp4
}
; <rdar://problem/14096009> superfluous multiply by high part of
; zero-extended value.
define i64 @mul1(i64 %n, i64* nocapture %z, i64* nocapture %x, i64 %y) nounwind {
; X86-LABEL: mul1:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: subl $28, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: orl %ecx, %eax
; X86-NEXT: je .LBB1_3
; X86-NEXT: # %bb.1: # %for.body.preheader
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: xorl %ebx, %ebx
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB1_2: # %for.body
; X86-NEXT: # =>This Inner Loop Header: Depth=1
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: movl (%eax,%ebx,8), %ebp
; X86-NEXT: movl 4(%eax,%ebx,8), %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ebp, %eax
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: mull %ecx
; X86-NEXT: movl %edx, %edi
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %esi, %eax
; X86-NEXT: mull %ecx
; X86-NEXT: movl %edx, %ecx
; X86-NEXT: movl %eax, %esi
; X86-NEXT: addl %edi, %esi
; X86-NEXT: adcl $0, %ecx
; X86-NEXT: movl %ebp, %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: mull %edx
; X86-NEXT: movl %edx, %ebp
; X86-NEXT: movl %eax, %edi
; X86-NEXT: addl %esi, %edi
; X86-NEXT: adcl %ecx, %ebp
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: mull {{[0-9]+}}(%esp)
; X86-NEXT: movl %edx, %ecx
; X86-NEXT: movl %eax, %esi
; X86-NEXT: addl %ebp, %esi
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X86-NEXT: adcl %eax, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: mull %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %eax, %ebp
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: mull %edx
; X86-NEXT: addl %ebp, %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: addl %esi, %eax
; X86-NEXT: adcl %ecx, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-NEXT: adcl $0, %eax
; X86-NEXT: adcl $0, %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %esi, (%ecx,%ebx,8)
; X86-NEXT: movl %edi, 4(%ecx,%ebx,8)
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %edi
; X86-NEXT: addl $1, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: adcl $0, %esi
; X86-NEXT: movl %ebx, %ecx
; X86-NEXT: xorl %ebp, %ecx
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: xorl %edi, %esi
; X86-NEXT: orl %ecx, %esi
; X86-NEXT: jne .LBB1_2
; X86-NEXT: .LBB1_3: # %for.end
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: addl $28, %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: mul1:
; X64: # %bb.0: # %entry
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: je .LBB1_3
; X64-NEXT: # %bb.1: # %for.body.preheader
; X64-NEXT: movq %rcx, %r8
; X64-NEXT: movq %rdx, %r9
; X64-NEXT: xorl %r10d, %r10d
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: .LBB1_2: # %for.body
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq (%r9,%rcx,8)
; X64-NEXT: addq %r10, %rax
; X64-NEXT: adcq $0, %rdx
; X64-NEXT: movq %rax, (%rsi,%rcx,8)
; X64-NEXT: incq %rcx
; X64-NEXT: cmpq %rcx, %rdi
; X64-NEXT: movq %rdx, %r10
; X64-NEXT: jne .LBB1_2
; X64-NEXT: .LBB1_3: # %for.end
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
entry:
%conv = zext i64 %y to i128
%cmp11 = icmp eq i64 %n, 0
br i1 %cmp11, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
%carry.013 = phi i64 [ %conv6, %for.body ], [ 0, %entry ]
%i.012 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i64, i64* %x, i64 %i.012
%0 = load i64, i64* %arrayidx, align 8
%conv2 = zext i64 %0 to i128
%mul = mul i128 %conv2, %conv
%conv3 = zext i64 %carry.013 to i128
%add = add i128 %mul, %conv3
%conv4 = trunc i128 %add to i64
%arrayidx5 = getelementptr inbounds i64, i64* %z, i64 %i.012
store i64 %conv4, i64* %arrayidx5, align 8
%shr = lshr i128 %add, 64
%conv6 = trunc i128 %shr to i64
%inc = add i64 %i.012, 1
%exitcond = icmp eq i64 %inc, %n
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body, %entry
ret i64 0
}