| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=i686-unknown-unknown -O3 | FileCheck %s --check-prefixes=X86 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=X64 |
| |
| define x86_fp80 @fma(x86_fp80 %x, x86_fp80 %y, x86_fp80 %z) nounwind strictfp { |
| ; X86-LABEL: fma: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $36, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll fmal |
| ; X86-NEXT: addl $36, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: fma: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $56, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq fmal@PLT |
| ; X64-NEXT: addq $56, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %fma = call x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80 %x, x86_fp80 %y, x86_fp80 %z, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %fma |
| } |
| |
| define x86_fp80 @frem(x86_fp80 %x, x86_fp80 %y) nounwind strictfp { |
| ; X86-LABEL: frem: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $24, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll fmodl |
| ; X86-NEXT: addl $24, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: frem: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $40, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq fmodl@PLT |
| ; X64-NEXT: addq $40, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %div = call x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80 %x, x86_fp80 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %div |
| } |
| |
| define x86_fp80 @ceil(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: ceil: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll ceill |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: ceil: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq ceill@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %ceil = call x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %ceil |
| } |
| |
| define x86_fp80 @cos(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: cos: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll cosl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: cos: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq cosl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %cos = call x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %cos |
| } |
| |
| define x86_fp80 @exp(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: exp: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll expl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: exp: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq expl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %exp = call x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %exp |
| } |
| |
| define x86_fp80 @exp2(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: exp2: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll exp2l |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: exp2: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq exp2l@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %exp2 = call x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %exp2 |
| } |
| |
| define x86_fp80 @floor(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: floor: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll floorl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: floor: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq floorl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %floor = call x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %floor |
| } |
| |
| define x86_fp80 @log(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: log: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll logl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: log: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq logl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %log = call x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %log |
| } |
| |
| define x86_fp80 @log10(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: log10: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll log10l |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: log10: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq log10l@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %log10 = call x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %log10 |
| } |
| |
| define x86_fp80 @log2(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: log2: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll log2l |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: log2: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq log2l@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %log2 = call x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %log2 |
| } |
| |
| define x86_fp80 @maxnum(x86_fp80 %x, x86_fp80 %y) nounwind strictfp { |
| ; X86-LABEL: maxnum: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $24, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll fmaxl |
| ; X86-NEXT: addl $24, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: maxnum: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $40, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq fmaxl@PLT |
| ; X64-NEXT: addq $40, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %maxnum = call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %x, x86_fp80 %y, metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %maxnum |
| } |
| |
| define x86_fp80 @minnum(x86_fp80 %x, x86_fp80 %y) nounwind strictfp { |
| ; X86-LABEL: minnum: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $24, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll fminl |
| ; X86-NEXT: addl $24, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: minnum: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $40, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq fminl@PLT |
| ; X64-NEXT: addq $40, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %minnum = call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %x, x86_fp80 %y, metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %minnum |
| } |
| |
| define x86_fp80 @nearbyint(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: nearbyint: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll nearbyintl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: nearbyint: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq nearbyintl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %nearbyint = call x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %nearbyint |
| } |
| |
| define x86_fp80 @pow(x86_fp80 %x, x86_fp80 %y) nounwind strictfp { |
| ; X86-LABEL: pow: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $24, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll powl |
| ; X86-NEXT: addl $24, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: pow: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $40, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq powl@PLT |
| ; X64-NEXT: addq $40, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %pow = call x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80 %x, x86_fp80 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %pow |
| } |
| |
| define x86_fp80 @powi(x86_fp80 %x, i32 %y) nounwind strictfp { |
| ; X86-LABEL: powi: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $16, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl %eax, {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll __powixf2 |
| ; X86-NEXT: addl $16, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: powi: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq __powixf2@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %powi = call x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80 %x, i32 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %powi |
| } |
| |
| define x86_fp80 @rint(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: rint: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll rintl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rint: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq rintl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %rint = call x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %rint |
| } |
| |
| define x86_fp80 @round(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: round: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll roundl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: round: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq roundl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %round = call x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %round |
| } |
| |
| define x86_fp80 @roundeven(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: roundeven: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll roundevenl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: roundeven: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq roundevenl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %roundeven = call x86_fp80 @llvm.experimental.constrained.roundeven.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %roundeven |
| } |
| |
| define x86_fp80 @sin(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: sin: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll sinl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sin: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq sinl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %sin = call x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %sin |
| } |
| |
| define x86_fp80 @trunc(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: trunc: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll truncl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: trunc: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq truncl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %trunc = call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 |
| ret x86_fp80 %trunc |
| } |
| |
| define i32 @lrint(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: lrint: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll lrintl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: lrint: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq lrintl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %rint = call i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret i32 %rint |
| } |
| |
| define i64 @llrint(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: llrint: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll llrintl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: llrint: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq llrintl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %rint = call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 |
| ret i64 %rint |
| } |
| |
| define i32 @lround(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: lround: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll lroundl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: lround: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq lroundl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %round = call i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 |
| ret i32 %round |
| } |
| |
| define i64 @llround(x86_fp80 %x) nounwind strictfp { |
| ; X86-LABEL: llround: |
| ; X86: # %bb.0: # %entry |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: fldt {{[0-9]+}}(%esp) |
| ; X86-NEXT: fstpt (%esp) |
| ; X86-NEXT: wait |
| ; X86-NEXT: calll llroundl |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: llround: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: subq $24, %rsp |
| ; X64-NEXT: fldt {{[0-9]+}}(%rsp) |
| ; X64-NEXT: fstpt (%rsp) |
| ; X64-NEXT: wait |
| ; X64-NEXT: callq llroundl@PLT |
| ; X64-NEXT: addq $24, %rsp |
| ; X64-NEXT: retq |
| entry: |
| %round = call i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 |
| ret i64 %round |
| } |
| |
| attributes #0 = { strictfp } |
| |
| declare x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80, x86_fp80, x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80, x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80, x86_fp80, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80, x86_fp80, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80, x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.roundeven.f80(x86_fp80, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata) |
| declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata) |
| declare i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80, metadata, metadata) |
| declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata) |
| declare i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80, metadata) |
| declare i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80, metadata) |