| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64 |
| ; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86 |
| |
| declare i4 @llvm.umul.fix.sat.i4 (i4, i4, i32) |
| declare i32 @llvm.umul.fix.sat.i32 (i32, i32, i32) |
| declare i64 @llvm.umul.fix.sat.i64 (i64, i64, i32) |
| declare <4 x i32> @llvm.umul.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32) |
| |
| define i32 @func(i32 %x, i32 %y) nounwind { |
| ; X64-LABEL: func: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %eax |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: imulq %rax, %rcx |
| ; X64-NEXT: movq %rcx, %rax |
| ; X64-NEXT: shrq $32, %rax |
| ; X64-NEXT: shrdl $2, %eax, %ecx |
| ; X64-NEXT: cmpl $3, %eax |
| ; X64-NEXT: movl $-1, %eax |
| ; X64-NEXT: cmovbel %ecx, %eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: shrdl $2, %edx, %eax |
| ; X86-NEXT: cmpl $3, %edx |
| ; X86-NEXT: movl $-1, %ecx |
| ; X86-NEXT: cmoval %ecx, %eax |
| ; X86-NEXT: retl |
| %tmp = call i32 @llvm.umul.fix.sat.i32(i32 %x, i32 %y, i32 2) |
| ret i32 %tmp |
| } |
| |
| define i64 @func2(i64 %x, i64 %y) nounwind { |
| ; X64-LABEL: func2: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movq %rdi, %rax |
| ; X64-NEXT: mulq %rsi |
| ; X64-NEXT: shrdq $2, %rdx, %rax |
| ; X64-NEXT: cmpq $3, %rdx |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: cmovaq %rcx, %rax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func2: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: mull %esi |
| ; X86-NEXT: movl %edx, %edi |
| ; X86-NEXT: movl %eax, %ebx |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %eax, %ecx |
| ; X86-NEXT: movl %edx, %ebp |
| ; X86-NEXT: addl %ebx, %ebp |
| ; X86-NEXT: adcl $0, %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull %esi |
| ; X86-NEXT: movl %edx, %ebx |
| ; X86-NEXT: movl %eax, %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: addl %ebp, %eax |
| ; X86-NEXT: adcl %edi, %edx |
| ; X86-NEXT: adcl $0, %ebx |
| ; X86-NEXT: addl %esi, %edx |
| ; X86-NEXT: adcl $0, %ebx |
| ; X86-NEXT: shrdl $2, %eax, %ecx |
| ; X86-NEXT: shrdl $2, %edx, %eax |
| ; X86-NEXT: shrl $2, %edx |
| ; X86-NEXT: orl %ebx, %edx |
| ; X86-NEXT: movl $-1, %edx |
| ; X86-NEXT: cmovnel %edx, %ecx |
| ; X86-NEXT: cmovel %eax, %edx |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl |
| %tmp = call i64 @llvm.umul.fix.sat.i64(i64 %x, i64 %y, i32 2) |
| ret i64 %tmp |
| } |
| |
| define i4 @func3(i4 %x, i4 %y) nounwind { |
| ; X64-LABEL: func3: |
| ; X64: # %bb.0: |
| ; X64-NEXT: andl $15, %esi |
| ; X64-NEXT: shlb $4, %dil |
| ; X64-NEXT: movzbl %dil, %eax |
| ; X64-NEXT: imull %esi, %eax |
| ; X64-NEXT: movl %eax, %ecx |
| ; X64-NEXT: shrb $2, %cl |
| ; X64-NEXT: shrl $8, %eax |
| ; X64-NEXT: movl %eax, %edx |
| ; X64-NEXT: shlb $6, %dl |
| ; X64-NEXT: orb %cl, %dl |
| ; X64-NEXT: movzbl %dl, %ecx |
| ; X64-NEXT: cmpb $3, %al |
| ; X64-NEXT: movl $255, %eax |
| ; X64-NEXT: cmovbel %ecx, %eax |
| ; X64-NEXT: shrb $4, %al |
| ; X64-NEXT: # kill: def $al killed $al killed $eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func3: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: andb $15, %al |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl |
| ; X86-NEXT: movzbl %al, %edx |
| ; X86-NEXT: shlb $4, %cl |
| ; X86-NEXT: movzbl %cl, %eax |
| ; X86-NEXT: imull %edx, %eax |
| ; X86-NEXT: movb %ah, %cl |
| ; X86-NEXT: shlb $6, %cl |
| ; X86-NEXT: shrb $2, %al |
| ; X86-NEXT: orb %cl, %al |
| ; X86-NEXT: movzbl %al, %ecx |
| ; X86-NEXT: cmpb $3, %ah |
| ; X86-NEXT: movl $255, %eax |
| ; X86-NEXT: cmovbel %ecx, %eax |
| ; X86-NEXT: shrb $4, %al |
| ; X86-NEXT: # kill: def $al killed $al killed $eax |
| ; X86-NEXT: retl |
| %tmp = call i4 @llvm.umul.fix.sat.i4(i4 %x, i4 %y, i32 2) |
| ret i4 %tmp |
| } |
| |
| define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind { |
| ; X64-LABEL: vec: |
| ; X64: # %bb.0: |
| ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] |
| ; X64-NEXT: movd %xmm2, %eax |
| ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] |
| ; X64-NEXT: movd %xmm2, %ecx |
| ; X64-NEXT: imulq %rax, %rcx |
| ; X64-NEXT: movq %rcx, %rax |
| ; X64-NEXT: shrq $32, %rax |
| ; X64-NEXT: shrdl $2, %eax, %ecx |
| ; X64-NEXT: cmpl $3, %eax |
| ; X64-NEXT: movl $-1, %eax |
| ; X64-NEXT: cmoval %eax, %ecx |
| ; X64-NEXT: movd %ecx, %xmm2 |
| ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] |
| ; X64-NEXT: movd %xmm3, %ecx |
| ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] |
| ; X64-NEXT: movd %xmm3, %edx |
| ; X64-NEXT: imulq %rcx, %rdx |
| ; X64-NEXT: movq %rdx, %rcx |
| ; X64-NEXT: shrq $32, %rcx |
| ; X64-NEXT: shrdl $2, %ecx, %edx |
| ; X64-NEXT: cmpl $3, %ecx |
| ; X64-NEXT: cmoval %eax, %edx |
| ; X64-NEXT: movd %edx, %xmm3 |
| ; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; X64-NEXT: movd %xmm1, %ecx |
| ; X64-NEXT: movd %xmm0, %edx |
| ; X64-NEXT: imulq %rcx, %rdx |
| ; X64-NEXT: movq %rdx, %rcx |
| ; X64-NEXT: shrq $32, %rcx |
| ; X64-NEXT: shrdl $2, %ecx, %edx |
| ; X64-NEXT: cmpl $3, %ecx |
| ; X64-NEXT: cmoval %eax, %edx |
| ; X64-NEXT: movd %edx, %xmm2 |
| ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] |
| ; X64-NEXT: movd %xmm1, %ecx |
| ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] |
| ; X64-NEXT: movd %xmm0, %edx |
| ; X64-NEXT: imulq %rcx, %rdx |
| ; X64-NEXT: movq %rdx, %rcx |
| ; X64-NEXT: shrq $32, %rcx |
| ; X64-NEXT: shrdl $2, %ecx, %edx |
| ; X64-NEXT: cmpl $3, %ecx |
| ; X64-NEXT: cmoval %eax, %edx |
| ; X64-NEXT: movd %edx, %xmm0 |
| ; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; X64-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] |
| ; X64-NEXT: movdqa %xmm2, %xmm0 |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: vec: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %eax, %esi |
| ; X86-NEXT: shrdl $2, %edx, %esi |
| ; X86-NEXT: cmpl $3, %edx |
| ; X86-NEXT: movl $-1, %ecx |
| ; X86-NEXT: cmoval %ecx, %esi |
| ; X86-NEXT: movl %ebp, %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %eax, %ebp |
| ; X86-NEXT: shrdl $2, %edx, %ebp |
| ; X86-NEXT: cmpl $3, %edx |
| ; X86-NEXT: cmoval %ecx, %ebp |
| ; X86-NEXT: movl %ebx, %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %eax, %ebx |
| ; X86-NEXT: shrdl $2, %edx, %ebx |
| ; X86-NEXT: cmpl $3, %edx |
| ; X86-NEXT: cmoval %ecx, %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: shrdl $2, %edx, %eax |
| ; X86-NEXT: cmpl $3, %edx |
| ; X86-NEXT: cmoval %ecx, %eax |
| ; X86-NEXT: movl %eax, 12(%edi) |
| ; X86-NEXT: movl %ebx, 8(%edi) |
| ; X86-NEXT: movl %ebp, 4(%edi) |
| ; X86-NEXT: movl %esi, (%edi) |
| ; X86-NEXT: movl %edi, %eax |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl $4 |
| %tmp = call <4 x i32> @llvm.umul.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 2) |
| ret <4 x i32> %tmp |
| } |
| |
| ; These result in regular integer multiplication |
| define i32 @func4(i32 %x, i32 %y) nounwind { |
| ; X64-LABEL: func4: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %eax |
| ; X64-NEXT: mull %esi |
| ; X64-NEXT: movl $-1, %ecx |
| ; X64-NEXT: cmovol %ecx, %eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func4: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl $-1, %ecx |
| ; X86-NEXT: cmovol %ecx, %eax |
| ; X86-NEXT: retl |
| %tmp = call i32 @llvm.umul.fix.sat.i32(i32 %x, i32 %y, i32 0) |
| ret i32 %tmp |
| } |
| |
| define i64 @func5(i64 %x, i64 %y) { |
| ; X64-LABEL: func5: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movq %rdi, %rax |
| ; X64-NEXT: mulq %rsi |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: cmovoq %rcx, %rax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func5: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: .cfi_def_cfa_offset 8 |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: .cfi_def_cfa_offset 12 |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: .cfi_def_cfa_offset 16 |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: .cfi_def_cfa_offset 20 |
| ; X86-NEXT: .cfi_offset %esi, -20 |
| ; X86-NEXT: .cfi_offset %edi, -16 |
| ; X86-NEXT: .cfi_offset %ebx, -12 |
| ; X86-NEXT: .cfi_offset %ebp, -8 |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: testl %esi, %esi |
| ; X86-NEXT: setne %dl |
| ; X86-NEXT: testl %eax, %eax |
| ; X86-NEXT: setne %cl |
| ; X86-NEXT: andb %dl, %cl |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %eax, %edi |
| ; X86-NEXT: seto %bl |
| ; X86-NEXT: movl %esi, %eax |
| ; X86-NEXT: mull %ebp |
| ; X86-NEXT: movl %eax, %esi |
| ; X86-NEXT: seto %ch |
| ; X86-NEXT: orb %bl, %ch |
| ; X86-NEXT: addl %edi, %esi |
| ; X86-NEXT: movl %ebp, %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: addl %esi, %edx |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: orb %ch, %bl |
| ; X86-NEXT: orb %cl, %bl |
| ; X86-NEXT: movl $-1, %ecx |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: cmovnel %ecx, %edx |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: .cfi_def_cfa_offset 16 |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: .cfi_def_cfa_offset 12 |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: .cfi_def_cfa_offset 8 |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: .cfi_def_cfa_offset 4 |
| ; X86-NEXT: retl |
| %tmp = call i64 @llvm.umul.fix.sat.i64(i64 %x, i64 %y, i32 0) |
| ret i64 %tmp |
| } |
| |
| define i4 @func6(i4 %x, i4 %y) nounwind { |
| ; X64-LABEL: func6: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %eax |
| ; X64-NEXT: andb $15, %sil |
| ; X64-NEXT: shlb $4, %al |
| ; X64-NEXT: # kill: def $al killed $al killed $eax |
| ; X64-NEXT: mulb %sil |
| ; X64-NEXT: movzbl %al, %ecx |
| ; X64-NEXT: movl $255, %eax |
| ; X64-NEXT: cmovnol %ecx, %eax |
| ; X64-NEXT: shrb $4, %al |
| ; X64-NEXT: # kill: def $al killed $al killed $eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func6: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl |
| ; X86-NEXT: andb $15, %cl |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: shlb $4, %al |
| ; X86-NEXT: mulb %cl |
| ; X86-NEXT: movzbl %al, %ecx |
| ; X86-NEXT: movl $255, %eax |
| ; X86-NEXT: cmovnol %ecx, %eax |
| ; X86-NEXT: shrb $4, %al |
| ; X86-NEXT: # kill: def $al killed $al killed $eax |
| ; X86-NEXT: retl |
| %tmp = call i4 @llvm.umul.fix.sat.i4(i4 %x, i4 %y, i32 0) |
| ret i4 %tmp |
| } |
| |
| define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind { |
| ; X64-LABEL: vec2: |
| ; X64: # %bb.0: |
| ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] |
| ; X64-NEXT: movd %xmm2, %eax |
| ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] |
| ; X64-NEXT: movd %xmm2, %ecx |
| ; X64-NEXT: mull %ecx |
| ; X64-NEXT: movl $-1, %ecx |
| ; X64-NEXT: cmovol %ecx, %eax |
| ; X64-NEXT: movd %eax, %xmm2 |
| ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] |
| ; X64-NEXT: movd %xmm3, %eax |
| ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] |
| ; X64-NEXT: movd %xmm3, %edx |
| ; X64-NEXT: mull %edx |
| ; X64-NEXT: cmovol %ecx, %eax |
| ; X64-NEXT: movd %eax, %xmm3 |
| ; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; X64-NEXT: movd %xmm0, %eax |
| ; X64-NEXT: movd %xmm1, %edx |
| ; X64-NEXT: mull %edx |
| ; X64-NEXT: cmovol %ecx, %eax |
| ; X64-NEXT: movd %eax, %xmm2 |
| ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] |
| ; X64-NEXT: movd %xmm0, %eax |
| ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] |
| ; X64-NEXT: movd %xmm0, %edx |
| ; X64-NEXT: mull %edx |
| ; X64-NEXT: cmovol %ecx, %eax |
| ; X64-NEXT: movd %eax, %xmm0 |
| ; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; X64-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] |
| ; X64-NEXT: movdqa %xmm2, %xmm0 |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: vec2: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %eax, %ebp |
| ; X86-NEXT: movl $-1, %esi |
| ; X86-NEXT: cmovol %esi, %ebp |
| ; X86-NEXT: movl %ebx, %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %eax, %ebx |
| ; X86-NEXT: cmovol %esi, %ebx |
| ; X86-NEXT: movl %edi, %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %eax, %edi |
| ; X86-NEXT: cmovol %esi, %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: cmovol %esi, %eax |
| ; X86-NEXT: movl %eax, 12(%ecx) |
| ; X86-NEXT: movl %edi, 8(%ecx) |
| ; X86-NEXT: movl %ebx, 4(%ecx) |
| ; X86-NEXT: movl %ebp, (%ecx) |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl $4 |
| %tmp = call <4 x i32> @llvm.umul.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0) |
| ret <4 x i32> %tmp |
| } |
| |
| define i64 @func7(i64 %x, i64 %y) nounwind { |
| ; X64-LABEL: func7: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movq %rdi, %rax |
| ; X64-NEXT: mulq %rsi |
| ; X64-NEXT: shrdq $32, %rdx, %rax |
| ; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF |
| ; X64-NEXT: cmpq %rcx, %rdx |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: cmovaq %rcx, %rax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func7: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: mull %ebp |
| ; X86-NEXT: movl %edx, %edi |
| ; X86-NEXT: movl %eax, %ebx |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: mull %esi |
| ; X86-NEXT: movl %edx, %ecx |
| ; X86-NEXT: addl %ebx, %ecx |
| ; X86-NEXT: adcl $0, %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull %ebp |
| ; X86-NEXT: movl %edx, %ebx |
| ; X86-NEXT: movl %eax, %ebp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull %esi |
| ; X86-NEXT: addl %ecx, %eax |
| ; X86-NEXT: adcl %edi, %edx |
| ; X86-NEXT: adcl $0, %ebx |
| ; X86-NEXT: addl %ebp, %edx |
| ; X86-NEXT: adcl $0, %ebx |
| ; X86-NEXT: xorl %ecx, %ecx |
| ; X86-NEXT: cmpl $1, %ebx |
| ; X86-NEXT: sbbl %ecx, %ecx |
| ; X86-NEXT: notl %ecx |
| ; X86-NEXT: orl %ecx, %eax |
| ; X86-NEXT: orl %ecx, %edx |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl |
| %tmp = call i64 @llvm.umul.fix.sat.i64(i64 %x, i64 %y, i32 32) |
| ret i64 %tmp |
| } |
| |
| define i64 @func8(i64 %x, i64 %y) nounwind { |
| ; X64-LABEL: func8: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movq %rdi, %rax |
| ; X64-NEXT: mulq %rsi |
| ; X64-NEXT: shrdq $63, %rdx, %rax |
| ; X64-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF |
| ; X64-NEXT: cmpq %rcx, %rdx |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: cmovaq %rcx, %rax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func8: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: mull %esi |
| ; X86-NEXT: movl %edx, %edi |
| ; X86-NEXT: movl %eax, %ebx |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %edx, %ebp |
| ; X86-NEXT: addl %ebx, %ebp |
| ; X86-NEXT: adcl $0, %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull %esi |
| ; X86-NEXT: movl %edx, %ecx |
| ; X86-NEXT: movl %eax, %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: mull {{[0-9]+}}(%esp) |
| ; X86-NEXT: addl %ebp, %eax |
| ; X86-NEXT: adcl %edi, %edx |
| ; X86-NEXT: adcl $0, %ecx |
| ; X86-NEXT: addl %ebx, %edx |
| ; X86-NEXT: adcl $0, %ecx |
| ; X86-NEXT: shrdl $31, %edx, %eax |
| ; X86-NEXT: movl %edx, %esi |
| ; X86-NEXT: shrl $31, %esi |
| ; X86-NEXT: xorl %edi, %edi |
| ; X86-NEXT: cmpl $1, %esi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: notl %edi |
| ; X86-NEXT: orl %edi, %eax |
| ; X86-NEXT: shldl $1, %edx, %ecx |
| ; X86-NEXT: orl %edi, %ecx |
| ; X86-NEXT: movl %ecx, %edx |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl |
| %tmp = call i64 @llvm.umul.fix.sat.i64(i64 %x, i64 %y, i32 63) |
| ret i64 %tmp |
| } |