| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64 |
| ; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86 |
| |
| declare i4 @llvm.udiv.fix.sat.i4 (i4, i4, i32) |
| declare i15 @llvm.udiv.fix.sat.i15 (i15, i15, i32) |
| declare i16 @llvm.udiv.fix.sat.i16 (i16, i16, i32) |
| declare i18 @llvm.udiv.fix.sat.i18 (i18, i18, i32) |
| declare i64 @llvm.udiv.fix.sat.i64 (i64, i64, i32) |
| declare <4 x i32> @llvm.udiv.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32) |
| |
| define i16 @func(i16 %x, i16 %y) nounwind { |
| ; X64-LABEL: func: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movzwl %si, %ecx |
| ; X64-NEXT: movzwl %di, %eax |
| ; X64-NEXT: shll $8, %eax |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divl %ecx |
| ; X64-NEXT: cmpl $131071, %eax # imm = 0x1FFFF |
| ; X64-NEXT: movl $131071, %ecx # imm = 0x1FFFF |
| ; X64-NEXT: cmovael %ecx, %eax |
| ; X64-NEXT: shrl %eax |
| ; X64-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movzwl %ax, %eax |
| ; X86-NEXT: shll $8, %eax |
| ; X86-NEXT: xorl %edx, %edx |
| ; X86-NEXT: divl %ecx |
| ; X86-NEXT: cmpl $131071, %eax # imm = 0x1FFFF |
| ; X86-NEXT: movl $131071, %ecx # imm = 0x1FFFF |
| ; X86-NEXT: cmovael %ecx, %eax |
| ; X86-NEXT: shrl %eax |
| ; X86-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X86-NEXT: retl |
| %tmp = call i16 @llvm.udiv.fix.sat.i16(i16 %x, i16 %y, i32 7) |
| ret i16 %tmp |
| } |
| |
| define i16 @func2(i8 %x, i8 %y) nounwind { |
| ; X64-LABEL: func2: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movsbl %dil, %eax |
| ; X64-NEXT: andl $32767, %eax # imm = 0x7FFF |
| ; X64-NEXT: movsbl %sil, %ecx |
| ; X64-NEXT: andl $32767, %ecx # imm = 0x7FFF |
| ; X64-NEXT: shll $14, %eax |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divl %ecx |
| ; X64-NEXT: cmpl $32767, %eax # imm = 0x7FFF |
| ; X64-NEXT: movl $32767, %ecx # imm = 0x7FFF |
| ; X64-NEXT: cmovbl %eax, %ecx |
| ; X64-NEXT: addl %ecx, %ecx |
| ; X64-NEXT: movswl %cx, %eax |
| ; X64-NEXT: shrl %eax |
| ; X64-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func2: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movsbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: andl $32767, %ecx # imm = 0x7FFF |
| ; X86-NEXT: movsbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $32767, %eax # imm = 0x7FFF |
| ; X86-NEXT: shll $14, %eax |
| ; X86-NEXT: xorl %edx, %edx |
| ; X86-NEXT: divl %ecx |
| ; X86-NEXT: cmpl $32767, %eax # imm = 0x7FFF |
| ; X86-NEXT: movl $32767, %ecx # imm = 0x7FFF |
| ; X86-NEXT: cmovbl %eax, %ecx |
| ; X86-NEXT: addl %ecx, %ecx |
| ; X86-NEXT: movswl %cx, %eax |
| ; X86-NEXT: shrl %eax |
| ; X86-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X86-NEXT: retl |
| %x2 = sext i8 %x to i15 |
| %y2 = sext i8 %y to i15 |
| %tmp = call i15 @llvm.udiv.fix.sat.i15(i15 %x2, i15 %y2, i32 14) |
| %tmp2 = sext i15 %tmp to i16 |
| ret i16 %tmp2 |
| } |
| |
| define i16 @func3(i15 %x, i8 %y) nounwind { |
| ; X64-LABEL: func3: |
| ; X64: # %bb.0: |
| ; X64-NEXT: # kill: def $edi killed $edi def $rdi |
| ; X64-NEXT: leal (%rdi,%rdi), %eax |
| ; X64-NEXT: movzbl %sil, %ecx |
| ; X64-NEXT: shll $4, %ecx |
| ; X64-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divw %cx |
| ; X64-NEXT: # kill: def $ax killed $ax def $eax |
| ; X64-NEXT: movzwl %ax, %ecx |
| ; X64-NEXT: cmpl $32767, %ecx # imm = 0x7FFF |
| ; X64-NEXT: movl $32767, %ecx # imm = 0x7FFF |
| ; X64-NEXT: cmovbl %eax, %ecx |
| ; X64-NEXT: addl %ecx, %ecx |
| ; X64-NEXT: movswl %cx, %eax |
| ; X64-NEXT: shrl %eax |
| ; X64-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func3: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: addl %eax, %eax |
| ; X86-NEXT: movzbl %cl, %ecx |
| ; X86-NEXT: shll $4, %ecx |
| ; X86-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X86-NEXT: xorl %edx, %edx |
| ; X86-NEXT: divw %cx |
| ; X86-NEXT: # kill: def $ax killed $ax def $eax |
| ; X86-NEXT: movzwl %ax, %ecx |
| ; X86-NEXT: cmpl $32767, %ecx # imm = 0x7FFF |
| ; X86-NEXT: movl $32767, %ecx # imm = 0x7FFF |
| ; X86-NEXT: cmovbl %eax, %ecx |
| ; X86-NEXT: addl %ecx, %ecx |
| ; X86-NEXT: movswl %cx, %eax |
| ; X86-NEXT: shrl %eax |
| ; X86-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X86-NEXT: retl |
| %y2 = sext i8 %y to i15 |
| %y3 = shl i15 %y2, 7 |
| %tmp = call i15 @llvm.udiv.fix.sat.i15(i15 %x, i15 %y3, i32 4) |
| %tmp2 = sext i15 %tmp to i16 |
| ret i16 %tmp2 |
| } |
| |
| define i4 @func4(i4 %x, i4 %y) nounwind { |
| ; X64-LABEL: func4: |
| ; X64: # %bb.0: |
| ; X64-NEXT: andb $15, %sil |
| ; X64-NEXT: andb $15, %dil |
| ; X64-NEXT: shlb $2, %dil |
| ; X64-NEXT: movzbl %dil, %eax |
| ; X64-NEXT: divb %sil |
| ; X64-NEXT: movzbl %al, %ecx |
| ; X64-NEXT: cmpb $15, %cl |
| ; X64-NEXT: movl $15, %eax |
| ; X64-NEXT: cmovbl %ecx, %eax |
| ; X64-NEXT: # kill: def $al killed $al killed $eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func4: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl |
| ; X86-NEXT: andb $15, %cl |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: andb $15, %al |
| ; X86-NEXT: shlb $2, %al |
| ; X86-NEXT: movzbl %al, %eax |
| ; X86-NEXT: divb %cl |
| ; X86-NEXT: movzbl %al, %ecx |
| ; X86-NEXT: cmpb $15, %al |
| ; X86-NEXT: movl $15, %eax |
| ; X86-NEXT: cmovbl %ecx, %eax |
| ; X86-NEXT: # kill: def $al killed $al killed $eax |
| ; X86-NEXT: retl |
| %tmp = call i4 @llvm.udiv.fix.sat.i4(i4 %x, i4 %y, i32 2) |
| ret i4 %tmp |
| } |
| |
| define i64 @func5(i64 %x, i64 %y) nounwind { |
| ; X64-LABEL: func5: |
| ; X64: # %bb.0: |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movq %rsi, %rdx |
| ; X64-NEXT: leaq (%rdi,%rdi), %rsi |
| ; X64-NEXT: movq %rdi, %rax |
| ; X64-NEXT: shrq $63, %rax |
| ; X64-NEXT: shrdq $33, %rax, %rsi |
| ; X64-NEXT: shlq $32, %rdi |
| ; X64-NEXT: xorl %ecx, %ecx |
| ; X64-NEXT: callq __udivti3@PLT |
| ; X64-NEXT: cmpq $2, %rdx |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: cmovbq %rax, %rcx |
| ; X64-NEXT: cmpq $1, %rdx |
| ; X64-NEXT: movl $1, %eax |
| ; X64-NEXT: cmovbq %rdx, %rax |
| ; X64-NEXT: shldq $63, %rcx, %rax |
| ; X64-NEXT: popq %rcx |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func5: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: movl %esp, %ebp |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: andl $-8, %esp |
| ; X86-NEXT: subl $24, %esp |
| ; X86-NEXT: movl 8(%ebp), %eax |
| ; X86-NEXT: movl 12(%ebp), %ecx |
| ; X86-NEXT: movl %ecx, %edx |
| ; X86-NEXT: shrl %edx |
| ; X86-NEXT: shldl $31, %eax, %ecx |
| ; X86-NEXT: shll $31, %eax |
| ; X86-NEXT: movl %esp, %esi |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl 20(%ebp) |
| ; X86-NEXT: pushl 16(%ebp) |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl %edx |
| ; X86-NEXT: pushl %ecx |
| ; X86-NEXT: pushl %eax |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: calll __udivti3 |
| ; X86-NEXT: addl $32, %esp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl $-1, %eax |
| ; X86-NEXT: movl $-1, %edx |
| ; X86-NEXT: jne .LBB4_2 |
| ; X86-NEXT: # %bb.1: |
| ; X86-NEXT: movl (%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: .LBB4_2: |
| ; X86-NEXT: leal -4(%ebp), %esp |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl |
| %tmp = call i64 @llvm.udiv.fix.sat.i64(i64 %x, i64 %y, i32 31) |
| ret i64 %tmp |
| } |
| |
| define i18 @func6(i16 %x, i16 %y) nounwind { |
| ; X64-LABEL: func6: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movswl %di, %eax |
| ; X64-NEXT: andl $262143, %eax # imm = 0x3FFFF |
| ; X64-NEXT: movswl %si, %ecx |
| ; X64-NEXT: andl $262143, %ecx # imm = 0x3FFFF |
| ; X64-NEXT: shll $7, %eax |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divl %ecx |
| ; X64-NEXT: cmpl $262143, %eax # imm = 0x3FFFF |
| ; X64-NEXT: movl $262143, %ecx # imm = 0x3FFFF |
| ; X64-NEXT: cmovael %ecx, %eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func6: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: andl $262143, %ecx # imm = 0x3FFFF |
| ; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $262143, %eax # imm = 0x3FFFF |
| ; X86-NEXT: shll $7, %eax |
| ; X86-NEXT: xorl %edx, %edx |
| ; X86-NEXT: divl %ecx |
| ; X86-NEXT: cmpl $262143, %eax # imm = 0x3FFFF |
| ; X86-NEXT: movl $262143, %ecx # imm = 0x3FFFF |
| ; X86-NEXT: cmovael %ecx, %eax |
| ; X86-NEXT: retl |
| %x2 = sext i16 %x to i18 |
| %y2 = sext i16 %y to i18 |
| %tmp = call i18 @llvm.udiv.fix.sat.i18(i18 %x2, i18 %y2, i32 7) |
| ret i18 %tmp |
| } |
| |
| define i16 @func7(i16 %x, i16 %y) nounwind { |
| ; X64-LABEL: func7: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movzwl %si, %ecx |
| ; X64-NEXT: movzwl %di, %eax |
| ; X64-NEXT: addl %eax, %eax |
| ; X64-NEXT: shlq $16, %rax |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divq %rcx |
| ; X64-NEXT: cmpq $131071, %rax # imm = 0x1FFFF |
| ; X64-NEXT: movl $131071, %ecx # imm = 0x1FFFF |
| ; X64-NEXT: cmovaeq %rcx, %rax |
| ; X64-NEXT: shrl %eax |
| ; X64-NEXT: # kill: def $ax killed $ax killed $rax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: func7: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movzwl %cx, %ecx |
| ; X86-NEXT: addl %ecx, %ecx |
| ; X86-NEXT: movl %ecx, %edx |
| ; X86-NEXT: shrl $16, %edx |
| ; X86-NEXT: shll $16, %ecx |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl %eax |
| ; X86-NEXT: pushl %edx |
| ; X86-NEXT: pushl %ecx |
| ; X86-NEXT: calll __udivdi3 |
| ; X86-NEXT: addl $16, %esp |
| ; X86-NEXT: cmpl $131071, %eax # imm = 0x1FFFF |
| ; X86-NEXT: movl $131071, %ecx # imm = 0x1FFFF |
| ; X86-NEXT: cmovael %ecx, %eax |
| ; X86-NEXT: testl %edx, %edx |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: shrl %eax |
| ; X86-NEXT: # kill: def $ax killed $ax killed $eax |
| ; X86-NEXT: retl |
| %tmp = call i16 @llvm.udiv.fix.sat.i16(i16 %x, i16 %y, i32 16) |
| ret i16 %tmp |
| } |
| |
| define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind { |
| ; X64-LABEL: vec: |
| ; X64: # %bb.0: |
| ; X64-NEXT: pxor %xmm8, %xmm8 |
| ; X64-NEXT: movdqa %xmm1, %xmm2 |
| ; X64-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] |
| ; X64-NEXT: movq %xmm2, %rcx |
| ; X64-NEXT: movdqa %xmm0, %xmm2 |
| ; X64-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] |
| ; X64-NEXT: paddq %xmm2, %xmm2 |
| ; X64-NEXT: psllq $31, %xmm2 |
| ; X64-NEXT: movq %xmm2, %rax |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divq %rcx |
| ; X64-NEXT: movq %rax, %xmm7 |
| ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] |
| ; X64-NEXT: movq %xmm2, %rax |
| ; X64-NEXT: movdqa %xmm1, %xmm2 |
| ; X64-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; X64-NEXT: movq %xmm2, %rcx |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divq %rcx |
| ; X64-NEXT: movq %rax, %xmm2 |
| ; X64-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm2[0] |
| ; X64-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456] |
| ; X64-NEXT: movdqa %xmm7, %xmm2 |
| ; X64-NEXT: pxor %xmm3, %xmm2 |
| ; X64-NEXT: movdqa {{.*#+}} xmm9 = [9223372043297226751,9223372043297226751] |
| ; X64-NEXT: movdqa %xmm9, %xmm6 |
| ; X64-NEXT: pcmpgtd %xmm2, %xmm6 |
| ; X64-NEXT: pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2] |
| ; X64-NEXT: pcmpeqd %xmm9, %xmm2 |
| ; X64-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3] |
| ; X64-NEXT: pand %xmm4, %xmm5 |
| ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3] |
| ; X64-NEXT: por %xmm5, %xmm2 |
| ; X64-NEXT: movdqa {{.*#+}} xmm6 = [8589934591,8589934591] |
| ; X64-NEXT: pand %xmm2, %xmm7 |
| ; X64-NEXT: pandn %xmm6, %xmm2 |
| ; X64-NEXT: por %xmm7, %xmm2 |
| ; X64-NEXT: psrlq $1, %xmm2 |
| ; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1] |
| ; X64-NEXT: paddq %xmm0, %xmm0 |
| ; X64-NEXT: psllq $31, %xmm0 |
| ; X64-NEXT: movq %xmm0, %rax |
| ; X64-NEXT: movd %xmm1, %ecx |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divq %rcx |
| ; X64-NEXT: movq %rax, %xmm4 |
| ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; X64-NEXT: movq %xmm0, %rax |
| ; X64-NEXT: psrlq $32, %xmm1 |
| ; X64-NEXT: movq %xmm1, %rcx |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divq %rcx |
| ; X64-NEXT: movq %rax, %xmm0 |
| ; X64-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] |
| ; X64-NEXT: pxor %xmm4, %xmm3 |
| ; X64-NEXT: movdqa %xmm9, %xmm0 |
| ; X64-NEXT: pcmpgtd %xmm3, %xmm0 |
| ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2] |
| ; X64-NEXT: pcmpeqd %xmm9, %xmm3 |
| ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] |
| ; X64-NEXT: pand %xmm1, %xmm3 |
| ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| ; X64-NEXT: por %xmm3, %xmm0 |
| ; X64-NEXT: pand %xmm0, %xmm4 |
| ; X64-NEXT: pandn %xmm6, %xmm0 |
| ; X64-NEXT: por %xmm4, %xmm0 |
| ; X64-NEXT: psrlq $1, %xmm0 |
| ; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2] |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: vec: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: xorl %eax, %eax |
| ; X86-NEXT: addl %ecx, %ecx |
| ; X86-NEXT: setb %al |
| ; X86-NEXT: shldl $31, %ecx, %eax |
| ; X86-NEXT: shll $31, %ecx |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl {{[0-9]+}}(%esp) |
| ; X86-NEXT: pushl %eax |
| ; X86-NEXT: pushl %ecx |
| ; X86-NEXT: calll __udivdi3 |
| ; X86-NEXT: addl $16, %esp |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: xorl %eax, %eax |
| ; X86-NEXT: addl %ebp, %ebp |
| ; X86-NEXT: setb %al |
| ; X86-NEXT: shldl $31, %ebp, %eax |
| ; X86-NEXT: shll $31, %ebp |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %eax |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: calll __udivdi3 |
| ; X86-NEXT: addl $16, %esp |
| ; X86-NEXT: movl %eax, (%esp) # 4-byte Spill |
| ; X86-NEXT: movl %edx, %ebp |
| ; X86-NEXT: xorl %eax, %eax |
| ; X86-NEXT: addl %edi, %edi |
| ; X86-NEXT: setb %al |
| ; X86-NEXT: shldl $31, %edi, %eax |
| ; X86-NEXT: shll $31, %edi |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl {{[0-9]+}}(%esp) |
| ; X86-NEXT: pushl %eax |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: calll __udivdi3 |
| ; X86-NEXT: addl $16, %esp |
| ; X86-NEXT: movl %eax, %ebx |
| ; X86-NEXT: movl %edx, %edi |
| ; X86-NEXT: xorl %eax, %eax |
| ; X86-NEXT: addl %esi, %esi |
| ; X86-NEXT: setb %al |
| ; X86-NEXT: shldl $31, %esi, %eax |
| ; X86-NEXT: shll $31, %esi |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl {{[0-9]+}}(%esp) |
| ; X86-NEXT: pushl %eax |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: calll __udivdi3 |
| ; X86-NEXT: addl $16, %esp |
| ; X86-NEXT: cmpl $2, %edx |
| ; X86-NEXT: movl $-1, %ecx |
| ; X86-NEXT: cmovael %ecx, %eax |
| ; X86-NEXT: cmpl $1, %edx |
| ; X86-NEXT: movl $1, %esi |
| ; X86-NEXT: cmovael %esi, %edx |
| ; X86-NEXT: shldl $31, %eax, %edx |
| ; X86-NEXT: cmpl $2, %edi |
| ; X86-NEXT: cmovael %ecx, %ebx |
| ; X86-NEXT: cmpl $1, %edi |
| ; X86-NEXT: cmovael %esi, %edi |
| ; X86-NEXT: shldl $31, %ebx, %edi |
| ; X86-NEXT: cmpl $2, %ebp |
| ; X86-NEXT: movl (%esp), %eax # 4-byte Reload |
| ; X86-NEXT: cmovael %ecx, %eax |
| ; X86-NEXT: cmpl $1, %ebp |
| ; X86-NEXT: cmovael %esi, %ebp |
| ; X86-NEXT: shldl $31, %eax, %ebp |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload |
| ; X86-NEXT: cmpl $2, %ebx |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: cmovael %ecx, %eax |
| ; X86-NEXT: cmpl $1, %ebx |
| ; X86-NEXT: cmovbl %ebx, %esi |
| ; X86-NEXT: shldl $31, %eax, %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl %esi, 12(%eax) |
| ; X86-NEXT: movl %ebp, 8(%eax) |
| ; X86-NEXT: movl %edi, 4(%eax) |
| ; X86-NEXT: movl %edx, (%eax) |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl $4 |
| %tmp = call <4 x i32> @llvm.udiv.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 31) |
| ret <4 x i32> %tmp |
| } |