blob: 3ecddd5279814fdd19f9271ca5f8c5a70e92a9b8 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+f16c -O3 | FileCheck %s --check-prefixes=AVX,F16C
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX,AVX512
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=X64
declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata)
declare half @llvm.experimental.constrained.fsub.f16(half, half, metadata, metadata)
declare half @llvm.experimental.constrained.fmul.f16(half, half, metadata, metadata)
declare half @llvm.experimental.constrained.fdiv.f16(half, half, metadata, metadata)
declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata)
declare double @llvm.experimental.constrained.fpext.f64.f16(half, metadata)
declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata)
declare half @llvm.experimental.constrained.fptrunc.f16.f64(double, metadata, metadata)
declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata)
declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata)
define half @fadd_f16(half %a, half %b) nounwind strictfp {
; SSE2-LABEL: fadd_f16:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: addss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: popq %rax
; SSE2-NEXT: retq
;
; AVX-LABEL: fadd_f16:
; AVX: # %bb.0:
; AVX-NEXT: vpextrw $0, %xmm0, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %ax, %eax
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
;
; X86-LABEL: fadd_f16:
; X86: # %bb.0:
; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: vaddsh {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: fadd_f16:
; X64: # %bb.0:
; X64-NEXT: vaddsh %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%ret = call half @llvm.experimental.constrained.fadd.f16(half %a, half %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret half %ret
}
define half @fsub_f16(half %a, half %b) nounwind strictfp {
; SSE2-LABEL: fsub_f16:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: subss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: popq %rax
; SSE2-NEXT: retq
;
; AVX-LABEL: fsub_f16:
; AVX: # %bb.0:
; AVX-NEXT: vpextrw $0, %xmm0, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %ax, %eax
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
;
; X86-LABEL: fsub_f16:
; X86: # %bb.0:
; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: vsubsh {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: fsub_f16:
; X64: # %bb.0:
; X64-NEXT: vsubsh %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%ret = call half @llvm.experimental.constrained.fsub.f16(half %a, half %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret half %ret
}
define half @fmul_f16(half %a, half %b) nounwind strictfp {
; SSE2-LABEL: fmul_f16:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: mulss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: popq %rax
; SSE2-NEXT: retq
;
; AVX-LABEL: fmul_f16:
; AVX: # %bb.0:
; AVX-NEXT: vpextrw $0, %xmm0, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %ax, %eax
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
;
; X86-LABEL: fmul_f16:
; X86: # %bb.0:
; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: vmulsh {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: fmul_f16:
; X64: # %bb.0:
; X64-NEXT: vmulsh %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%ret = call half @llvm.experimental.constrained.fmul.f16(half %a, half %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret half %ret
}
define half @fdiv_f16(half %a, half %b) nounwind strictfp {
; SSE2-LABEL: fdiv_f16:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: divss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: popq %rax
; SSE2-NEXT: retq
;
; AVX-LABEL: fdiv_f16:
; AVX: # %bb.0:
; AVX-NEXT: vpextrw $0, %xmm0, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %ax, %eax
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
;
; X86-LABEL: fdiv_f16:
; X86: # %bb.0:
; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: vdivsh {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: fdiv_f16:
; X64: # %bb.0:
; X64-NEXT: vdivsh %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%ret = call half @llvm.experimental.constrained.fdiv.f16(half %a, half %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret half %ret
}
define void @fpext_f16_to_f32(ptr %val, ptr %ret) nounwind strictfp {
; SSE2-LABEL: fpext_f16_to_f32:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: movq %rsi, %rbx
; SSE2-NEXT: pinsrw $0, (%rdi), %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movd %xmm0, (%rbx)
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: retq
;
; AVX-LABEL: fpext_f16_to_f32:
; AVX: # %bb.0:
; AVX-NEXT: movzwl (%rdi), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: vmovss %xmm0, (%rsi)
; AVX-NEXT: retq
;
; X86-LABEL: fpext_f16_to_f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovsh (%ecx), %xmm0
; X86-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
; X86-NEXT: vmovss %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: fpext_f16_to_f32:
; X64: # %bb.0:
; X64-NEXT: vmovsh (%rdi), %xmm0
; X64-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovss %xmm0, (%rsi)
; X64-NEXT: retq
%1 = load half, ptr %val, align 4
%res = call float @llvm.experimental.constrained.fpext.f32.f16(half %1,
metadata !"fpexcept.strict") #0
store float %res, ptr %ret, align 8
ret void
}
define void @fpext_f16_to_f64(ptr %val, ptr %ret) nounwind strictfp {
; SSE2-LABEL: fpext_f16_to_f64:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: movq %rsi, %rbx
; SSE2-NEXT: pinsrw $0, (%rdi), %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: cvtss2sd %xmm0, %xmm0
; SSE2-NEXT: movsd %xmm0, (%rbx)
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: retq
;
; AVX-LABEL: fpext_f16_to_f64:
; AVX: # %bb.0:
; AVX-NEXT: movzwl (%rdi), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovsd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; X86-LABEL: fpext_f16_to_f64:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovsh (%ecx), %xmm0
; X86-NEXT: vcvtsh2sd %xmm0, %xmm0, %xmm0
; X86-NEXT: vmovsd %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: fpext_f16_to_f64:
; X64: # %bb.0:
; X64-NEXT: vmovsh (%rdi), %xmm0
; X64-NEXT: vcvtsh2sd %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovsd %xmm0, (%rsi)
; X64-NEXT: retq
%1 = load half, ptr %val, align 4
%res = call double @llvm.experimental.constrained.fpext.f64.f16(half %1,
metadata !"fpexcept.strict") #0
store double %res, ptr %ret, align 8
ret void
}
define void @fptrunc_float_to_f16(ptr %val, ptr%ret) nounwind strictfp {
; SSE2-LABEL: fptrunc_float_to_f16:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: movq %rsi, %rbx
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: pextrw $0, %xmm0, %eax
; SSE2-NEXT: movw %ax, (%rbx)
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: retq
;
; AVX-LABEL: fptrunc_float_to_f16:
; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: movw %ax, (%rsi)
; AVX-NEXT: retq
;
; X86-LABEL: fptrunc_float_to_f16:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
; X86-NEXT: vmovsh %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: fptrunc_float_to_f16:
; X64: # %bb.0:
; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovsh %xmm0, (%rsi)
; X64-NEXT: retq
%1 = load float, ptr %val, align 8
%res = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %1,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
store half %res, ptr %ret, align 4
ret void
}
define void @fptrunc_double_to_f16(ptr %val, ptr%ret) nounwind strictfp {
; SSE2-LABEL: fptrunc_double_to_f16:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: movq %rsi, %rbx
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: callq __truncdfhf2@PLT
; SSE2-NEXT: pextrw $0, %xmm0, %eax
; SSE2-NEXT: movw %ax, (%rbx)
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: retq
;
; AVX-LABEL: fptrunc_double_to_f16:
; AVX: # %bb.0:
; AVX-NEXT: pushq %rbx
; AVX-NEXT: movq %rsi, %rbx
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq __truncdfhf2@PLT
; AVX-NEXT: vpextrw $0, %xmm0, (%rbx)
; AVX-NEXT: popq %rbx
; AVX-NEXT: retq
;
; X86-LABEL: fptrunc_double_to_f16:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: vcvtsd2sh %xmm0, %xmm0, %xmm0
; X86-NEXT: vmovsh %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: fptrunc_double_to_f16:
; X64: # %bb.0:
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vcvtsd2sh %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovsh %xmm0, (%rsi)
; X64-NEXT: retq
%1 = load double, ptr %val, align 8
%res = call half @llvm.experimental.constrained.fptrunc.f16.f64(double %1,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
store half %res, ptr %ret, align 4
ret void
}
define void @fsqrt_f16(ptr %a) nounwind strictfp {
; SSE2-LABEL: fsqrt_f16:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: movq %rdi, %rbx
; SSE2-NEXT: pinsrw $0, (%rdi), %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: sqrtss %xmm0, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: pextrw $0, %xmm0, %eax
; SSE2-NEXT: movw %ax, (%rbx)
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: retq
;
; AVX-LABEL: fsqrt_f16:
; AVX: # %bb.0:
; AVX-NEXT: movzwl (%rdi), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: movw %ax, (%rdi)
; AVX-NEXT: retq
;
; X86-LABEL: fsqrt_f16:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovsh (%eax), %xmm0
; X86-NEXT: vsqrtsh %xmm0, %xmm0, %xmm0
; X86-NEXT: vmovsh %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: fsqrt_f16:
; X64: # %bb.0:
; X64-NEXT: vmovsh (%rdi), %xmm0
; X64-NEXT: vsqrtsh %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovsh %xmm0, (%rdi)
; X64-NEXT: retq
%1 = load half, ptr %a, align 4
%res = call half @llvm.experimental.constrained.sqrt.f16(half %1,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
store half %res, ptr %a, align 4
ret void
}
define half @fma_f16(half %a, half %b, half %c) nounwind strictfp {
; SSE2-LABEL: fma_f16:
; SSE2: # %bb.0:
; SSE2-NEXT: subq $24, %rsp
; SSE2-NEXT: movss %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Reload
; SSE2-NEXT: # xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: callq fmaf@PLT
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: addq $24, %rsp
; SSE2-NEXT: retq
;
; F16C-LABEL: fma_f16:
; F16C: # %bb.0:
; F16C-NEXT: pushq %rax
; F16C-NEXT: vpextrw $0, %xmm0, %eax
; F16C-NEXT: vpextrw $0, %xmm1, %ecx
; F16C-NEXT: vpextrw $0, %xmm2, %edx
; F16C-NEXT: movzwl %dx, %edx
; F16C-NEXT: vmovd %edx, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm2
; F16C-NEXT: movzwl %cx, %ecx
; F16C-NEXT: vmovd %ecx, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm1
; F16C-NEXT: movzwl %ax, %eax
; F16C-NEXT: vmovd %eax, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: callq fmaf@PLT
; F16C-NEXT: vxorps %xmm1, %xmm1, %xmm1
; F16C-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT: vmovd %xmm0, %eax
; F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
; F16C-NEXT: popq %rax
; F16C-NEXT: retq
;
; AVX512-LABEL: fma_f16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpextrw $0, %xmm1, %eax
; AVX512-NEXT: vpextrw $0, %xmm0, %ecx
; AVX512-NEXT: vpextrw $0, %xmm2, %edx
; AVX512-NEXT: movzwl %dx, %edx
; AVX512-NEXT: vmovd %edx, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: movzwl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: vmovd %eax, %xmm2
; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm2 = (xmm1 * xmm2) + xmm0
; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; X86-LABEL: fma_f16:
; X86: # %bb.0:
; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm1
; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: vfmadd213sh {{[0-9]+}}(%esp), %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: fma_f16:
; X64: # %bb.0:
; X64-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
%res = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %c,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret half %res
}
attributes #0 = { strictfp }