| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s --check-prefix=X64 |
| ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx | FileCheck %s --check-prefix=X64 |
| ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=-mmx | FileCheck %s --check-prefix=X64_NO_MMX |
| ; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=+mmx | FileCheck %s --check-prefix=X32 |
| |
| ; Check soft floating point conversion function calls. |
| |
| @vi32 = common global i32 0, align 4 |
| @vi64 = common global i64 0, align 8 |
| @vu32 = common global i32 0, align 4 |
| @vu64 = common global i64 0, align 8 |
| @vf32 = common global float 0.000000e+00, align 4 |
| @vf64 = common global double 0.000000e+00, align 8 |
| @vf80 = common global x86_fp80 0xK00000000000000000000, align 8 |
| @vf128 = common global fp128 0xL00000000000000000000000000000000, align 16 |
| |
| define void @TestFPExtF32_F128() nounwind { |
| ; X64-LABEL: TestFPExtF32_F128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; X64-NEXT: callq __extendsftf2 |
| ; X64-NEXT: movaps %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPExtF32_F128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; X64_NO_MMX-NEXT: callq __extendsftf2 |
| ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPExtF32_F128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $24, %esp |
| ; X32-NEXT: flds vf32 |
| ; X32-NEXT: fstps {{[0-9]+}}(%esp) |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl %eax, (%esp) |
| ; X32-NEXT: calll __extendsftf2 |
| ; X32-NEXT: subl $4, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl %esi, vf128+12 |
| ; X32-NEXT: movl %edx, vf128+8 |
| ; X32-NEXT: movl %ecx, vf128+4 |
| ; X32-NEXT: movl %eax, vf128 |
| ; X32-NEXT: addl $24, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: retl |
| entry: |
| %0 = load float, float* @vf32, align 4 |
| %conv = fpext float %0 to fp128 |
| store fp128 %conv, fp128* @vf128, align 16 |
| ret void |
| } |
| |
| define void @TestFPExtF64_F128() nounwind { |
| ; X64-LABEL: TestFPExtF64_F128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero |
| ; X64-NEXT: callq __extenddftf2 |
| ; X64-NEXT: movaps %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPExtF64_F128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero |
| ; X64_NO_MMX-NEXT: callq __extenddftf2 |
| ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPExtF64_F128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $40, %esp |
| ; X32-NEXT: fldl vf64 |
| ; X32-NEXT: fstpl {{[0-9]+}}(%esp) |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl %eax, (%esp) |
| ; X32-NEXT: calll __extenddftf2 |
| ; X32-NEXT: subl $4, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl %esi, vf128+12 |
| ; X32-NEXT: movl %edx, vf128+8 |
| ; X32-NEXT: movl %ecx, vf128+4 |
| ; X32-NEXT: movl %eax, vf128 |
| ; X32-NEXT: addl $40, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: retl |
| entry: |
| %0 = load double, double* @vf64, align 8 |
| %conv = fpext double %0 to fp128 |
| store fp128 %conv, fp128* @vf128, align 16 |
| ret void |
| } |
| |
| define void @TestFPExtF80_F128() nounwind { |
| ; X64-LABEL: TestFPExtF80_F128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{.*}}(%rip) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: callq __extendxftf2 |
| ; X64-NEXT: movaps %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPExtF80_F128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: subq $24, %rsp |
| ; X64_NO_MMX-NEXT: fldt {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: fstpt (%rsp) |
| ; X64_NO_MMX-NEXT: callq __extendxftf2 |
| ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: addq $24, %rsp |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPExtF80_F128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $40, %esp |
| ; X32-NEXT: fldt vf80 |
| ; X32-NEXT: fstpt {{[0-9]+}}(%esp) |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl %eax, (%esp) |
| ; X32-NEXT: calll __extendxftf2 |
| ; X32-NEXT: subl $4, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl %esi, vf128+12 |
| ; X32-NEXT: movl %edx, vf128+8 |
| ; X32-NEXT: movl %ecx, vf128+4 |
| ; X32-NEXT: movl %eax, vf128 |
| ; X32-NEXT: addl $40, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: retl |
| entry: |
| %0 = load x86_fp80, x86_fp80* @vf80, align 8 |
| %conv = fpext x86_fp80 %0 to fp128 |
| store fp128 %conv, fp128* @vf128, align 16 |
| ret void |
| } |
| |
| define void @TestFPToSIF128_I32() nounwind { |
| ; X64-LABEL: TestFPToSIF128_I32: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: callq __fixtfsi |
| ; X64-NEXT: movl %eax, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPToSIF128_I32: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi |
| ; X64_NO_MMX-NEXT: callq __fixtfsi |
| ; X64_NO_MMX-NEXT: movl %eax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPToSIF128_I32: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: pushl vf128+12 |
| ; X32-NEXT: pushl vf128+8 |
| ; X32-NEXT: pushl vf128+4 |
| ; X32-NEXT: pushl vf128 |
| ; X32-NEXT: calll __fixtfsi |
| ; X32-NEXT: addl $16, %esp |
| ; X32-NEXT: movl %eax, vi32 |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: retl |
| entry: |
| %0 = load fp128, fp128* @vf128, align 16 |
| %conv = fptosi fp128 %0 to i32 |
| store i32 %conv, i32* @vi32, align 4 |
| ret void |
| } |
| |
| define void @TestFPToUIF128_U32() nounwind { |
| ; X64-LABEL: TestFPToUIF128_U32: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: callq __fixunstfsi |
| ; X64-NEXT: movl %eax, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPToUIF128_U32: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi |
| ; X64_NO_MMX-NEXT: callq __fixunstfsi |
| ; X64_NO_MMX-NEXT: movl %eax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPToUIF128_U32: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: pushl vf128+12 |
| ; X32-NEXT: pushl vf128+8 |
| ; X32-NEXT: pushl vf128+4 |
| ; X32-NEXT: pushl vf128 |
| ; X32-NEXT: calll __fixunstfsi |
| ; X32-NEXT: addl $16, %esp |
| ; X32-NEXT: movl %eax, vu32 |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: retl |
| entry: |
| %0 = load fp128, fp128* @vf128, align 16 |
| %conv = fptoui fp128 %0 to i32 |
| store i32 %conv, i32* @vu32, align 4 |
| ret void |
| } |
| |
| define void @TestFPToSIF128_I64() nounwind { |
| ; X64-LABEL: TestFPToSIF128_I64: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: callq __fixtfsi |
| ; X64-NEXT: cltq |
| ; X64-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPToSIF128_I64: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi |
| ; X64_NO_MMX-NEXT: callq __fixtfsi |
| ; X64_NO_MMX-NEXT: cltq |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPToSIF128_I64: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: pushl vf128+12 |
| ; X32-NEXT: pushl vf128+8 |
| ; X32-NEXT: pushl vf128+4 |
| ; X32-NEXT: pushl vf128 |
| ; X32-NEXT: calll __fixtfsi |
| ; X32-NEXT: addl $16, %esp |
| ; X32-NEXT: movl %eax, vi64 |
| ; X32-NEXT: sarl $31, %eax |
| ; X32-NEXT: movl %eax, vi64+4 |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: retl |
| entry: |
| %0 = load fp128, fp128* @vf128, align 16 |
| %conv = fptosi fp128 %0 to i32 |
| %conv1 = sext i32 %conv to i64 |
| store i64 %conv1, i64* @vi64, align 8 |
| ret void |
| } |
| |
| define void @TestFPToUIF128_U64() nounwind { |
| ; X64-LABEL: TestFPToUIF128_U64: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: callq __fixunstfsi |
| ; X64-NEXT: movl %eax, %eax |
| ; X64-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPToUIF128_U64: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi |
| ; X64_NO_MMX-NEXT: callq __fixunstfsi |
| ; X64_NO_MMX-NEXT: movl %eax, %eax |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPToUIF128_U64: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: pushl vf128+12 |
| ; X32-NEXT: pushl vf128+8 |
| ; X32-NEXT: pushl vf128+4 |
| ; X32-NEXT: pushl vf128 |
| ; X32-NEXT: calll __fixunstfsi |
| ; X32-NEXT: addl $16, %esp |
| ; X32-NEXT: movl %eax, vu64 |
| ; X32-NEXT: movl $0, vu64+4 |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: retl |
| entry: |
| %0 = load fp128, fp128* @vf128, align 16 |
| %conv = fptoui fp128 %0 to i32 |
| %conv1 = zext i32 %conv to i64 |
| store i64 %conv1, i64* @vu64, align 8 |
| ret void |
| } |
| |
| define void @TestFPTruncF128_F32() nounwind { |
| ; X64-LABEL: TestFPTruncF128_F32: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: callq __trunctfsf2 |
| ; X64-NEXT: movss %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPTruncF128_F32: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi |
| ; X64_NO_MMX-NEXT: callq __trunctfsf2 |
| ; X64_NO_MMX-NEXT: movss %xmm0, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPTruncF128_F32: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: pushl vf128+12 |
| ; X32-NEXT: pushl vf128+8 |
| ; X32-NEXT: pushl vf128+4 |
| ; X32-NEXT: pushl vf128 |
| ; X32-NEXT: calll __trunctfsf2 |
| ; X32-NEXT: addl $16, %esp |
| ; X32-NEXT: fstps vf32 |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: retl |
| entry: |
| %0 = load fp128, fp128* @vf128, align 16 |
| %conv = fptrunc fp128 %0 to float |
| store float %conv, float* @vf32, align 4 |
| ret void |
| } |
| |
| define void @TestFPTruncF128_F64() nounwind { |
| ; X64-LABEL: TestFPTruncF128_F64: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: callq __trunctfdf2 |
| ; X64-NEXT: movsd %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPTruncF128_F64: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi |
| ; X64_NO_MMX-NEXT: callq __trunctfdf2 |
| ; X64_NO_MMX-NEXT: movsd %xmm0, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPTruncF128_F64: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: pushl vf128+12 |
| ; X32-NEXT: pushl vf128+8 |
| ; X32-NEXT: pushl vf128+4 |
| ; X32-NEXT: pushl vf128 |
| ; X32-NEXT: calll __trunctfdf2 |
| ; X32-NEXT: addl $16, %esp |
| ; X32-NEXT: fstpl vf64 |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: retl |
| entry: |
| %0 = load fp128, fp128* @vf128, align 16 |
| %conv = fptrunc fp128 %0 to double |
| store double %conv, double* @vf64, align 8 |
| ret void |
| } |
| |
| define void @TestFPTruncF128_F80() nounwind { |
| ; X64-LABEL: TestFPTruncF128_F80: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: callq __trunctfxf2 |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: movq (%rsp), %rax |
| ; X64-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64-NEXT: movl {{[0-9]+}}(%rsp), %eax |
| ; X64-NEXT: movw %ax, vf80+{{.*}}(%rip) |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestFPTruncF128_F80: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi |
| ; X64_NO_MMX-NEXT: callq __trunctfxf2 |
| ; X64_NO_MMX-NEXT: fstpt {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestFPTruncF128_F80: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: pushl vf128+12 |
| ; X32-NEXT: pushl vf128+8 |
| ; X32-NEXT: pushl vf128+4 |
| ; X32-NEXT: pushl vf128 |
| ; X32-NEXT: calll __trunctfxf2 |
| ; X32-NEXT: addl $16, %esp |
| ; X32-NEXT: fstpt vf80 |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: retl |
| entry: |
| %0 = load fp128, fp128* @vf128, align 16 |
| %conv = fptrunc fp128 %0 to x86_fp80 |
| store x86_fp80 %conv, x86_fp80* @vf80, align 8 |
| ret void |
| } |
| |
| define void @TestSIToFPI32_F128() nounwind { |
| ; X64-LABEL: TestSIToFPI32_F128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movl {{.*}}(%rip), %edi |
| ; X64-NEXT: callq __floatsitf |
| ; X64-NEXT: movaps %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestSIToFPI32_F128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movl {{.*}}(%rip), %edi |
| ; X64_NO_MMX-NEXT: callq __floatsitf |
| ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestSIToFPI32_F128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $32, %esp |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: pushl vi32 |
| ; X32-NEXT: pushl %eax |
| ; X32-NEXT: calll __floatsitf |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl %esi, vf128+12 |
| ; X32-NEXT: movl %edx, vf128+8 |
| ; X32-NEXT: movl %ecx, vf128+4 |
| ; X32-NEXT: movl %eax, vf128 |
| ; X32-NEXT: addl $24, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: retl |
| entry: |
| %0 = load i32, i32* @vi32, align 4 |
| %conv = sitofp i32 %0 to fp128 |
| store fp128 %conv, fp128* @vf128, align 16 |
| ret void |
| } |
| |
| define void @TestUIToFPU32_F128() #2 { |
| ; X64-LABEL: TestUIToFPU32_F128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movl {{.*}}(%rip), %edi |
| ; X64-NEXT: callq __floatunsitf |
| ; X64-NEXT: movaps %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestUIToFPU32_F128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movl {{.*}}(%rip), %edi |
| ; X64_NO_MMX-NEXT: callq __floatunsitf |
| ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestUIToFPU32_F128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $32, %esp |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: pushl vu32 |
| ; X32-NEXT: pushl %eax |
| ; X32-NEXT: calll __floatunsitf |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl %esi, vf128+12 |
| ; X32-NEXT: movl %edx, vf128+8 |
| ; X32-NEXT: movl %ecx, vf128+4 |
| ; X32-NEXT: movl %eax, vf128 |
| ; X32-NEXT: addl $24, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: retl |
| entry: |
| %0 = load i32, i32* @vu32, align 4 |
| %conv = uitofp i32 %0 to fp128 |
| store fp128 %conv, fp128* @vf128, align 16 |
| ret void |
| } |
| |
| define void @TestSIToFPI64_F128() nounwind { |
| ; X64-LABEL: TestSIToFPI64_F128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64-NEXT: callq __floatditf |
| ; X64-NEXT: movaps %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestSIToFPI64_F128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: callq __floatditf |
| ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestSIToFPI64_F128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $28, %esp |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: pushl vi64+4 |
| ; X32-NEXT: pushl vi64 |
| ; X32-NEXT: pushl %eax |
| ; X32-NEXT: calll __floatditf |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl %esi, vf128+12 |
| ; X32-NEXT: movl %edx, vf128+8 |
| ; X32-NEXT: movl %ecx, vf128+4 |
| ; X32-NEXT: movl %eax, vf128 |
| ; X32-NEXT: addl $24, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: retl |
| entry: |
| %0 = load i64, i64* @vi64, align 8 |
| %conv = sitofp i64 %0 to fp128 |
| store fp128 %conv, fp128* @vf128, align 16 |
| ret void |
| } |
| |
| define void @TestUIToFPU64_F128() #2 { |
| ; X64-LABEL: TestUIToFPU64_F128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64-NEXT: callq __floatunditf |
| ; X64-NEXT: movaps %xmm0, {{.*}}(%rip) |
| ; X64-NEXT: popq %rax |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestUIToFPU64_F128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi |
| ; X64_NO_MMX-NEXT: callq __floatunditf |
| ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip) |
| ; X64_NO_MMX-NEXT: popq %rax |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestUIToFPU64_F128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $28, %esp |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: pushl vu64+4 |
| ; X32-NEXT: pushl vu64 |
| ; X32-NEXT: pushl %eax |
| ; X32-NEXT: calll __floatunditf |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl %esi, vf128+12 |
| ; X32-NEXT: movl %edx, vf128+8 |
| ; X32-NEXT: movl %ecx, vf128+4 |
| ; X32-NEXT: movl %eax, vf128 |
| ; X32-NEXT: addl $24, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: retl |
| entry: |
| %0 = load i64, i64* @vu64, align 8 |
| %conv = uitofp i64 %0 to fp128 |
| store fp128 %conv, fp128* @vf128, align 16 |
| ret void |
| } |
| |
| define i32 @TestConst128(fp128 %v) nounwind { |
| ; X64-LABEL: TestConst128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm1 |
| ; X64-NEXT: callq __gttf2 |
| ; X64-NEXT: xorl %ecx, %ecx |
| ; X64-NEXT: testl %eax, %eax |
| ; X64-NEXT: setg %cl |
| ; X64-NEXT: movl %ecx, %eax |
| ; X64-NEXT: popq %rcx |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestConst128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movabsq $4611404543450677248, %rcx # imm = 0x3FFF000000000000 |
| ; X64_NO_MMX-NEXT: xorl %edx, %edx |
| ; X64_NO_MMX-NEXT: callq __gttf2 |
| ; X64_NO_MMX-NEXT: xorl %ecx, %ecx |
| ; X64_NO_MMX-NEXT: testl %eax, %eax |
| ; X64_NO_MMX-NEXT: setg %cl |
| ; X64_NO_MMX-NEXT: movl %ecx, %eax |
| ; X64_NO_MMX-NEXT: popq %rcx |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestConst128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: pushl $1073676288 # imm = 0x3FFF0000 |
| ; X32-NEXT: pushl $0 |
| ; X32-NEXT: pushl $0 |
| ; X32-NEXT: pushl $0 |
| ; X32-NEXT: pushl {{[0-9]+}}(%esp) |
| ; X32-NEXT: pushl {{[0-9]+}}(%esp) |
| ; X32-NEXT: pushl {{[0-9]+}}(%esp) |
| ; X32-NEXT: pushl {{[0-9]+}}(%esp) |
| ; X32-NEXT: calll __gttf2 |
| ; X32-NEXT: addl $32, %esp |
| ; X32-NEXT: xorl %ecx, %ecx |
| ; X32-NEXT: testl %eax, %eax |
| ; X32-NEXT: setg %cl |
| ; X32-NEXT: movl %ecx, %eax |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: retl |
| entry: |
| %cmp = fcmp ogt fp128 %v, 0xL00000000000000003FFF000000000000 |
| %conv = zext i1 %cmp to i32 |
| ret i32 %conv |
| } |
| |
| ; C code: |
| ; struct TestBits_ieee_ext { |
| ; unsigned v1; |
| ; unsigned v2; |
| ; }; |
| ; union TestBits_LDU { |
| ; FP128 ld; |
| ; struct TestBits_ieee_ext bits; |
| ; }; |
| ; int TestBits128(FP128 ld) { |
| ; union TestBits_LDU u; |
| ; u.ld = ld * ld; |
| ; return ((u.bits.v1 | u.bits.v2) == 0); |
| ; } |
| define i32 @TestBits128(fp128 %ld) nounwind { |
| ; X64-LABEL: TestBits128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: movaps %xmm0, %xmm1 |
| ; X64-NEXT: callq __multf3 |
| ; X64-NEXT: movaps %xmm0, (%rsp) |
| ; X64-NEXT: movq (%rsp), %rcx |
| ; X64-NEXT: movq %rcx, %rdx |
| ; X64-NEXT: shrq $32, %rdx |
| ; X64-NEXT: xorl %eax, %eax |
| ; X64-NEXT: orl %ecx, %edx |
| ; X64-NEXT: sete %al |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestBits128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq %rdi, %rdx |
| ; X64_NO_MMX-NEXT: movq %rsi, %rcx |
| ; X64_NO_MMX-NEXT: callq __multf3 |
| ; X64_NO_MMX-NEXT: movq %rax, %rdx |
| ; X64_NO_MMX-NEXT: shrq $32, %rdx |
| ; X64_NO_MMX-NEXT: xorl %ecx, %ecx |
| ; X64_NO_MMX-NEXT: orl %eax, %edx |
| ; X64_NO_MMX-NEXT: sete %cl |
| ; X64_NO_MMX-NEXT: movl %ecx, %eax |
| ; X64_NO_MMX-NEXT: popq %rcx |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestBits128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %edi |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $20, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: subl $12, %esp |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %edi |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: pushl %edx |
| ; X32-NEXT: pushl %ecx |
| ; X32-NEXT: pushl %eax |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: pushl %edx |
| ; X32-NEXT: pushl %ecx |
| ; X32-NEXT: pushl %eax |
| ; X32-NEXT: pushl %edi |
| ; X32-NEXT: calll __multf3 |
| ; X32-NEXT: addl $44, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: xorl %eax, %eax |
| ; X32-NEXT: orl (%esp), %ecx |
| ; X32-NEXT: sete %al |
| ; X32-NEXT: addl $20, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: popl %edi |
| ; X32-NEXT: retl |
| entry: |
| %mul = fmul fp128 %ld, %ld |
| %0 = bitcast fp128 %mul to i128 |
| %shift = lshr i128 %0, 32 |
| %or5 = or i128 %shift, %0 |
| %or = trunc i128 %or5 to i32 |
| %cmp = icmp eq i32 %or, 0 |
| %conv = zext i1 %cmp to i32 |
| ret i32 %conv |
| ; If TestBits128 fails due to any llvm or clang change, |
| ; please make sure the original simplified C code will |
| ; be compiled into correct IL and assembly code, not |
| ; just this TestBits128 test case. Better yet, try to |
| ; test the whole libm and its test cases. |
| } |
| |
| ; C code: (compiled with -target x86_64-linux-android) |
| ; typedef long double __float128; |
| ; __float128 TestPair128(unsigned long a, unsigned long b) { |
| ; unsigned __int128 n; |
| ; unsigned __int128 v1 = ((unsigned __int128)a << 64); |
| ; unsigned __int128 v2 = (unsigned __int128)b; |
| ; n = (v1 | v2) + 3; |
| ; return *(__float128*)&n; |
| ; } |
| define fp128 @TestPair128(i64 %a, i64 %b) nounwind { |
| ; X64-LABEL: TestPair128: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: addq $3, %rsi |
| ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) |
| ; X64-NEXT: adcq $0, %rdi |
| ; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) |
| ; X64-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestPair128: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: movq %rsi, %rax |
| ; X64_NO_MMX-NEXT: addq $3, %rax |
| ; X64_NO_MMX-NEXT: adcq $0, %rdi |
| ; X64_NO_MMX-NEXT: movq %rdi, %rdx |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestPair128: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %edi |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X32-NEXT: addl $3, %ecx |
| ; X32-NEXT: adcl $0, %edx |
| ; X32-NEXT: adcl $0, %esi |
| ; X32-NEXT: adcl $0, %edi |
| ; X32-NEXT: movl %edx, 4(%eax) |
| ; X32-NEXT: movl %ecx, (%eax) |
| ; X32-NEXT: movl %esi, 8(%eax) |
| ; X32-NEXT: movl %edi, 12(%eax) |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: popl %edi |
| ; X32-NEXT: retl $4 |
| entry: |
| %conv = zext i64 %a to i128 |
| %shl = shl nuw i128 %conv, 64 |
| %conv1 = zext i64 %b to i128 |
| %or = or i128 %shl, %conv1 |
| %add = add i128 %or, 3 |
| %0 = bitcast i128 %add to fp128 |
| ret fp128 %0 |
| } |
| |
| define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind { |
| ; X64-LABEL: TestTruncCopysign: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: cmpl $50001, %edi # imm = 0xC351 |
| ; X64-NEXT: jl .LBB17_2 |
| ; X64-NEXT: # %bb.1: # %if.then |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: callq __trunctfdf2 |
| ; X64-NEXT: andps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero |
| ; X64-NEXT: orps %xmm1, %xmm0 |
| ; X64-NEXT: callq __extenddftf2 |
| ; X64-NEXT: addq $8, %rsp |
| ; X64-NEXT: .LBB17_2: # %cleanup |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: TestTruncCopysign: |
| ; X64_NO_MMX: # %bb.0: # %entry |
| ; X64_NO_MMX-NEXT: movl %edx, %ecx |
| ; X64_NO_MMX-NEXT: movq %rsi, %rdx |
| ; X64_NO_MMX-NEXT: movq %rdi, %rax |
| ; X64_NO_MMX-NEXT: cmpl $50001, %ecx # imm = 0xC351 |
| ; X64_NO_MMX-NEXT: jl .LBB17_2 |
| ; X64_NO_MMX-NEXT: # %bb.1: # %if.then |
| ; X64_NO_MMX-NEXT: pushq %rax |
| ; X64_NO_MMX-NEXT: movq %rax, %rdi |
| ; X64_NO_MMX-NEXT: movq %rdx, %rsi |
| ; X64_NO_MMX-NEXT: callq __trunctfdf2 |
| ; X64_NO_MMX-NEXT: andps {{.*}}(%rip), %xmm0 |
| ; X64_NO_MMX-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero |
| ; X64_NO_MMX-NEXT: orps %xmm1, %xmm0 |
| ; X64_NO_MMX-NEXT: callq __extenddftf2 |
| ; X64_NO_MMX-NEXT: addq $8, %rsp |
| ; X64_NO_MMX-NEXT: .LBB17_2: # %cleanup |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: TestTruncCopysign: |
| ; X32: # %bb.0: # %entry |
| ; X32-NEXT: pushl %edi |
| ; X32-NEXT: pushl %esi |
| ; X32-NEXT: subl $36, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: cmpl $50001, {{[0-9]+}}(%esp) # imm = 0xC351 |
| ; X32-NEXT: jl .LBB17_4 |
| ; X32-NEXT: # %bb.1: # %if.then |
| ; X32-NEXT: pushl %eax |
| ; X32-NEXT: pushl %ecx |
| ; X32-NEXT: pushl %edi |
| ; X32-NEXT: pushl %edx |
| ; X32-NEXT: calll __trunctfdf2 |
| ; X32-NEXT: addl $16, %esp |
| ; X32-NEXT: fstpl {{[0-9]+}}(%esp) |
| ; X32-NEXT: testb $-128, {{[0-9]+}}(%esp) |
| ; X32-NEXT: flds {{\.LCPI.*}} |
| ; X32-NEXT: flds {{\.LCPI.*}} |
| ; X32-NEXT: jne .LBB17_3 |
| ; X32-NEXT: # %bb.2: # %if.then |
| ; X32-NEXT: fstp %st(1) |
| ; X32-NEXT: fldz |
| ; X32-NEXT: .LBB17_3: # %if.then |
| ; X32-NEXT: fstp %st(0) |
| ; X32-NEXT: subl $16, %esp |
| ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl %eax, (%esp) |
| ; X32-NEXT: fstpl {{[0-9]+}}(%esp) |
| ; X32-NEXT: calll __extenddftf2 |
| ; X32-NEXT: addl $12, %esp |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X32-NEXT: .LBB17_4: # %cleanup |
| ; X32-NEXT: movl %edx, (%esi) |
| ; X32-NEXT: movl %edi, 4(%esi) |
| ; X32-NEXT: movl %ecx, 8(%esi) |
| ; X32-NEXT: movl %eax, 12(%esi) |
| ; X32-NEXT: movl %esi, %eax |
| ; X32-NEXT: addl $36, %esp |
| ; X32-NEXT: popl %esi |
| ; X32-NEXT: popl %edi |
| ; X32-NEXT: retl $4 |
| entry: |
| %cmp = icmp sgt i32 %n, 50000 |
| br i1 %cmp, label %if.then, label %cleanup |
| |
| if.then: ; preds = %entry |
| %conv = fptrunc fp128 %x to double |
| %call = tail call double @copysign(double 0x7FF0000000000000, double %conv) #2 |
| %conv1 = fpext double %call to fp128 |
| br label %cleanup |
| |
| cleanup: ; preds = %entry, %if.then |
| %retval.0 = phi fp128 [ %conv1, %if.then ], [ %x, %entry ] |
| ret fp128 %retval.0 |
| } |
| |
| define i1 @PR34866(i128 %x) nounwind { |
| ; X64-LABEL: PR34866: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) |
| ; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi |
| ; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi |
| ; X64-NEXT: orq %rsi, %rdi |
| ; X64-NEXT: sete %al |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: PR34866: |
| ; X64_NO_MMX: # %bb.0: |
| ; X64_NO_MMX-NEXT: orq %rsi, %rdi |
| ; X64_NO_MMX-NEXT: sete %al |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: PR34866: |
| ; X32: # %bb.0: |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: orl %ecx, %eax |
| ; X32-NEXT: sete %al |
| ; X32-NEXT: retl |
| %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128 |
| %cmp = icmp eq i128 %bc_mmx, %x |
| ret i1 %cmp |
| } |
| |
| define i1 @PR34866_commute(i128 %x) nounwind { |
| ; X64-LABEL: PR34866_commute: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movaps {{.*}}(%rip), %xmm0 |
| ; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) |
| ; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi |
| ; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi |
| ; X64-NEXT: orq %rsi, %rdi |
| ; X64-NEXT: sete %al |
| ; X64-NEXT: retq |
| ; |
| ; X64_NO_MMX-LABEL: PR34866_commute: |
| ; X64_NO_MMX: # %bb.0: |
| ; X64_NO_MMX-NEXT: orq %rsi, %rdi |
| ; X64_NO_MMX-NEXT: sete %al |
| ; X64_NO_MMX-NEXT: retq |
| ; |
| ; X32-LABEL: PR34866_commute: |
| ; X32: # %bb.0: |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx |
| ; X32-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X32-NEXT: orl %ecx, %eax |
| ; X32-NEXT: sete %al |
| ; X32-NEXT: retl |
| %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128 |
| %cmp = icmp eq i128 %x, %bc_mmx |
| ret i1 %cmp |
| } |
| |
| |
| declare double @copysign(double, double) #1 |
| |
| attributes #2 = { nounwind readnone } |