| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 |
| ; RUN: llc < %s | FileCheck %s |
| |
| target triple = "aarch64" |
| |
| define <vscale x 4 x float> @fmlslbt_bf16_f32(<vscale x 4 x float> %acc, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) "target-features"="+sve2p1,+bf16" { |
| ; CHECK-LABEL: fmlslbt_bf16_f32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: bfmlslb z0.s, z1.h, z2.h |
| ; CHECK-NEXT: bfmlslt z0.s, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %a.fpext = fpext <vscale x 8 x bfloat> %a to <vscale x 8 x float> |
| %b.fpext = fpext <vscale x 8 x bfloat> %b to <vscale x 8 x float> |
| %b.fpext.neg = fneg <vscale x 8 x float> %b.fpext |
| %mul = fmul fast <vscale x 8 x float> %a.fpext, %b.fpext.neg |
| %res = call fast <vscale x 4 x float> @llvm.vector.partial.reduce.fadd(<vscale x 4 x float> %acc, <vscale x 8 x float> %mul) |
| ret <vscale x 4 x float> %res |
| } |
| |
| define <vscale x 4 x float> @fmlslbt_f16_f32(<vscale x 4 x float> %acc, <vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 { |
| ; CHECK-LABEL: fmlslbt_f16_f32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlslb z0.s, z1.h, z2.h |
| ; CHECK-NEXT: fmlslt z0.s, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %a.fpext = fpext <vscale x 8 x half> %a to <vscale x 8 x float> |
| %b.fpext = fpext <vscale x 8 x half> %b to <vscale x 8 x float> |
| %b.fpext.neg = fneg <vscale x 8 x float> %b.fpext |
| %mul = fmul fast <vscale x 8 x float> %a.fpext, %b.fpext.neg |
| %res = call fast <vscale x 4 x float> @llvm.vector.partial.reduce.fadd(<vscale x 4 x float> %acc, <vscale x 8 x float> %mul) |
| ret <vscale x 4 x float> %res |
| } |
| |
| define <vscale x 4 x float> @fmlslbt_f16_f32_extended_fadd(<vscale x 4 x float> %acc, <vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 { |
| ; CHECK-LABEL: fmlslbt_f16_f32_extended_fadd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmov z2.h, #-1.00000000 |
| ; CHECK-NEXT: fmlalb z0.s, z1.h, z2.h |
| ; CHECK-NEXT: fmlalt z0.s, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %a.fpext = fpext <vscale x 8 x half> %a to <vscale x 8 x float> |
| %neg = fneg <vscale x 8 x float> %a.fpext |
| %res = call fast <vscale x 4 x float> @llvm.vector.partial.reduce.fadd(<vscale x 4 x float> %acc, <vscale x 8 x float> %neg) |
| ret <vscale x 4 x float> %res |
| } |
| |
| ; |
| ; Fixed-length tests |
| ; |
| |
| ; FIXME: This could use SVE2p1's bfmlslb/t |
| define <4 x float> @fixed_fmlslbt_bf16_f32(<4 x float> %acc, <8 x bfloat> %a, <8 x bfloat> %b) "target-features"="+sve2p1,+bf16" { |
| ; CHECK-LABEL: fixed_fmlslbt_bf16_f32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: movi v3.8h, #128, lsl #8 |
| ; CHECK-NEXT: eor v2.16b, v2.16b, v3.16b |
| ; CHECK-NEXT: bfmlalb v0.4s, v1.8h, v2.8h |
| ; CHECK-NEXT: bfmlalt v0.4s, v1.8h, v2.8h |
| ; CHECK-NEXT: ret |
| %a.fpext = fpext <8 x bfloat> %a to <8 x float> |
| %b.fpext = fpext <8 x bfloat> %b to <8 x float> |
| %b.fpext.neg = fneg <8 x float> %b.fpext |
| %mul = fmul fast <8 x float> %a.fpext, %b.fpext.neg |
| %res = call fast <4 x float> @llvm.vector.partial.reduce.fadd(<4 x float> %acc, <8 x float> %mul) |
| ret <4 x float> %res |
| } |
| |
| define <4 x float> @fixed_fmlslbt_f16_f32(<4 x float> %acc, <8 x half> %a, <8 x half> %b) #0 "target-features"="+sve2p1,+bf16" { |
| ; CHECK-LABEL: fixed_fmlslbt_f16_f32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fneg v2.8h, v2.8h |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fmlalb z0.s, z1.h, z2.h |
| ; CHECK-NEXT: fmlalt z0.s, z1.h, z2.h |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| %a.fpext = fpext <8 x half> %a to <8 x float> |
| %b.fpext = fpext <8 x half> %b to <8 x float> |
| %b.fpext.neg = fneg <8 x float> %b.fpext |
| %mul = fmul fast <8 x float> %a.fpext, %b.fpext.neg |
| %res = call fast <4 x float> @llvm.vector.partial.reduce.fadd(<4 x float> %acc, <8 x float> %mul) |
| ret <4 x float> %res |
| } |
| |
| attributes #0 = { "target-features"="+sve2" } |