| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 |
| ; RUN: llc -mattr=+sve %s -o - | FileCheck %s --check-prefixes=CHECK |
| ; RUN: llc -mattr=+sme -force-streaming %s -o - | FileCheck %s --check-prefixes=CHECK |
| |
| target triple = "aarch64" |
| |
| ; A * B + C |
| ; Negate only the addend: |
| |
| define <vscale x 2 x double> @fma_negC_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fma_negC_nxv2f64: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 2 x double> %c |
| %0 = tail call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %neg) |
| ret <vscale x 2 x double> %0 |
| } |
| |
| define <vscale x 4 x float> @fma_negC_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fma_negC_nxv4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 4 x float> %c |
| %0 = tail call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %neg) |
| ret <vscale x 4 x float> %0 |
| } |
| |
| define <vscale x 8 x half> @fma_negC_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fma_negC_nxv8f16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 8 x half> %c |
| %0 = tail call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %neg) |
| ret <vscale x 8 x half> %0 |
| } |
| |
| define <2 x double> @fma_negC_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) { |
| ; CHECK-LABEL: fma_negC_v2f64: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.d, vl2 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <2 x double> %c |
| %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %a, <2 x double> %b, <2 x double> %neg) |
| ret <2 x double> %0 |
| } |
| |
| define <4 x float> @fma_negC_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: fma_negC_v4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s, vl4 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <4 x float> %c |
| %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %neg) |
| ret <4 x float> %0 |
| } |
| |
| define <8 x half> @fma_negC_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) { |
| ; CHECK-LABEL: fma_negC_v8f16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.h, vl8 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <8 x half> %c |
| %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %a, <8 x half> %b, <8 x half> %neg) |
| ret <8 x half> %0 |
| } |
| |
| define <4 x float> @fma_negC_commutative_v4f32(<4 x float> %c, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-LABEL: fma_negC_commutative_v4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s, vl4 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmls z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <4 x float> %c |
| %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %neg) |
| ret <4 x float> %0 |
| } |
| |
| ; Negate one multiplicand (A/B) and the addend (C). |
| |
| define <vscale x 2 x double> @fma_negA_negC_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fma_negA_negC_nxv2f64: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 2 x double> %a |
| %neg1 = fneg <vscale x 2 x double> %c |
| %0 = tail call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> %neg, <vscale x 2 x double> %b, <vscale x 2 x double> %neg1) |
| ret <vscale x 2 x double> %0 |
| } |
| |
| define <vscale x 4 x float> @fma_negA_negC_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fma_negA_negC_nxv4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 4 x float> %a |
| %neg1 = fneg <vscale x 4 x float> %c |
| %0 = tail call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> %neg, <vscale x 4 x float> %b, <vscale x 4 x float> %neg1) |
| ret <vscale x 4 x float> %0 |
| } |
| |
| define <vscale x 8 x half> @fma_negA_negC_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fma_negA_negC_nxv8f16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 8 x half> %a |
| %neg1 = fneg <vscale x 8 x half> %c |
| %0 = tail call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> %neg, <vscale x 8 x half> %b, <vscale x 8 x half> %neg1) |
| ret <vscale x 8 x half> %0 |
| } |
| |
| define <2 x double> @fma_negA_negC_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) { |
| ; CHECK-LABEL: fma_negA_negC_v2f64: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.d, vl2 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <2 x double> %a |
| %neg1 = fneg <2 x double> %c |
| %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %neg, <2 x double> %b, <2 x double> %neg1) |
| ret <2 x double> %0 |
| } |
| |
| define <4 x float> @fma_negA_negC_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: fma_negA_negC_v4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s, vl4 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <4 x float> %a |
| %neg1 = fneg <4 x float> %c |
| %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %neg, <4 x float> %b, <4 x float> %neg1) |
| ret <4 x float> %0 |
| } |
| |
| define <8 x half> @fma_negA_negC_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) { |
| ; CHECK-LABEL: fma_negA_negC_v8f16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.h, vl8 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <8 x half> %a |
| %neg1 = fneg <8 x half> %c |
| %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %neg, <8 x half> %b, <8 x half> %neg1) |
| ret <8 x half> %0 |
| } |
| |
| define <4 x float> @fma_negA_negC_commutative_v4f32(<4 x float> %c, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-LABEL: fma_negA_negC_commutative_v4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s, vl4 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <4 x float> %a |
| %neg1 = fneg <4 x float> %c |
| %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %neg, <4 x float> %b, <4 x float> %neg1) |
| ret <4 x float> %0 |
| } |
| |
| define <vscale x 2 x double> @fma_negB_negC_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fma_negB_negC_nxv2f64: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 2 x double> %b |
| %neg1 = fneg <vscale x 2 x double> %c |
| %0 = tail call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> %a, <vscale x 2 x double> %neg, <vscale x 2 x double> %neg1) |
| ret <vscale x 2 x double> %0 |
| } |
| |
| define <vscale x 4 x float> @fma_negB_negC_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fma_negB_negC_nxv4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 4 x float> %b |
| %neg1 = fneg <vscale x 4 x float> %c |
| %0 = tail call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> %a, <vscale x 4 x float> %neg, <vscale x 4 x float> %neg1) |
| ret <vscale x 4 x float> %0 |
| } |
| |
| define <vscale x 8 x half> @fma_negB_negC_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fma_negB_negC_nxv8f16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <vscale x 8 x half> %b |
| %neg1 = fneg <vscale x 8 x half> %c |
| %0 = tail call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> %a, <vscale x 8 x half> %neg, <vscale x 8 x half> %neg1) |
| ret <vscale x 8 x half> %0 |
| } |
| |
| define <2 x double> @fma_negB_negC_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) { |
| ; CHECK-LABEL: fma_negB_negC_v2f64: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.d, vl2 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <2 x double> %b |
| %neg1 = fneg <2 x double> %c |
| %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %a, <2 x double> %neg, <2 x double> %neg1) |
| ret <2 x double> %0 |
| } |
| |
| define <4 x float> @fma_negB_negC_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: fma_negB_negC_v4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s, vl4 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <4 x float> %b |
| %neg1 = fneg <4 x float> %c |
| %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %neg, <4 x float> %neg1) |
| ret <4 x float> %0 |
| } |
| |
| define <8 x half> @fma_negB_negC_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) { |
| ; CHECK-LABEL: fma_negB_negC_v8f16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.h, vl8 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <8 x half> %b |
| %neg1 = fneg <8 x half> %c |
| %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %a, <8 x half> %neg, <8 x half> %neg1) |
| ret <8 x half> %0 |
| } |
| |
| define <4 x float> @fma_negB_negC_commutative_v4f32(<4 x float> %c, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-LABEL: fma_negB_negC_commutative_v4f32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ptrue p0.s, vl4 |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 |
| ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 |
| ; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| ; CHECK-NEXT: ret |
| entry: |
| %neg = fneg <4 x float> %b |
| %neg1 = fneg <4 x float> %c |
| %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %neg, <4 x float> %neg1) |
| ret <4 x float> %0 |
| } |