| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s |
| |
| ; |
| ; FABD |
| ; |
| |
| define <vscale x 8 x half> @fabd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fabd_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fabd z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fabd.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fabd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fabd_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fabd z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fabd.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fabd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fabd_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fabd z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fabd.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FADD |
| ; |
| |
| define <vscale x 8 x half> @fadd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fadd_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fadd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fadd_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fadd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fadd_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FDIV |
| ; |
| |
| define <vscale x 8 x half> @fdiv_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fdiv_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fdiv.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fdiv_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fdiv_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fdiv.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fdiv_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fdiv_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fdiv z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fdiv.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FDIVR |
| ; |
| |
| define <vscale x 8 x half> @fdivr_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fdivr_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fdivr z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fdiv.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %b, |
| <vscale x 8 x half> %a) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fdivr_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fdivr_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fdivr z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fdiv.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %b, |
| <vscale x 4 x float> %a) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fdivr_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fdivr_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fdivr z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fdiv.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %b, |
| <vscale x 2 x double> %a) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMAD |
| ; |
| |
| define <vscale x 8 x half> @fmad_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fmad_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmla.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %c, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmad_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fmad_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmla.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %c, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmad_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fmad_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmla.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %c, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMAX |
| ; |
| |
| define <vscale x 8 x half> @fmax_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fmax_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmax_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fmax_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmax_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fmax_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMAXNM |
| ; |
| |
| define <vscale x 8 x half> @fmaxnm_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fmaxnm_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmaxnm_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fmaxnm_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmaxnm_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fmaxnm_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMIN |
| ; |
| |
| define <vscale x 8 x half> @fmin_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fmin_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmin_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fmin_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmin_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fmin_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMINNM |
| ; |
| |
| define <vscale x 8 x half> @fminnm_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fminnm_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fminnm_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fminnm_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fminnm_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fminnm_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMLA |
| ; |
| |
| define <vscale x 8 x half> @fmla_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fmla_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmla.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b, |
| <vscale x 8 x half> %c) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmla_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fmla_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmla.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b, |
| <vscale x 4 x float> %c) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmla_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fmla_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmla.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b, |
| <vscale x 2 x double> %c) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMLS |
| ; |
| |
| define <vscale x 8 x half> @fmls_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fmls_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmls.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b, |
| <vscale x 8 x half> %c) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmls_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fmls_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmls z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmls.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b, |
| <vscale x 4 x float> %c) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmls_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fmls_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmls z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmls.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b, |
| <vscale x 2 x double> %c) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMSB |
| ; |
| |
| define <vscale x 8 x half> @fmsb_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fmsb_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmsb z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmls.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %c, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmsb_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fmsb_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmsb z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmls.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %c, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmsb_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fmsb_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmsb z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmls.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %c, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMUL |
| ; |
| |
| define <vscale x 8 x half> @fmul_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fmul_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmul_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fmul_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmul_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fmul_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FMULX |
| ; |
| |
| define <vscale x 8 x half> @fmulx_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fmulx_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmulx z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmulx.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fmulx_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fmulx_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmulx z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmulx.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fmulx_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fmulx_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmulx z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmulx.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FNMAD |
| ; |
| |
| define <vscale x 8 x half> @fnmad_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fnmad_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fnmla.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %c, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fnmad_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fnmad_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fnmla.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %c, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fnmad_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fnmad_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fnmla.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %c, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FNMLA |
| ; |
| |
| define <vscale x 8 x half> @fnmla_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fnmla_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fnmla.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b, |
| <vscale x 8 x half> %c) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fnmla_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fnmla_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fnmla.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b, |
| <vscale x 4 x float> %c) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fnmla_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fnmla_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmla z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fnmla.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b, |
| <vscale x 2 x double> %c) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FNMLS |
| ; |
| |
| define <vscale x 8 x half> @fnmls_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fnmls_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmls z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fnmls.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b, |
| <vscale x 8 x half> %c) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fnmls_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fnmls_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmls z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fnmls.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b, |
| <vscale x 4 x float> %c) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fnmls_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fnmls_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmls z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fnmls.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b, |
| <vscale x 2 x double> %c) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FNMSB |
| ; |
| |
| define <vscale x 8 x half> @fnmsb_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| ; CHECK-LABEL: fnmsb_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fnmls.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %c, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fnmsb_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| ; CHECK-LABEL: fnmsb_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fnmls.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %c, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fnmsb_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: fnmsb_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fnmls.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %c, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FSUB |
| ; |
| |
| define <vscale x 8 x half> @fsub_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fsub_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %a, |
| <vscale x 8 x half> %b) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fsub_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fsub_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %a, |
| <vscale x 4 x float> %b) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fsub_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fsub_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| ret <vscale x 2 x double> %out |
| } |
| |
| ; |
| ; FSUBR |
| ; |
| |
| define <vscale x 8 x half> @fsubr_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { |
| ; CHECK-LABEL: fsubr_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.u.nxv8f16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x half> %b, |
| <vscale x 8 x half> %a) |
| ret <vscale x 8 x half> %out |
| } |
| |
| define <vscale x 4 x float> @fsubr_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) { |
| ; CHECK-LABEL: fsubr_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.u.nxv4f32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x float> %b, |
| <vscale x 4 x float> %a) |
| ret <vscale x 4 x float> %out |
| } |
| |
| define <vscale x 2 x double> @fsubr_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fsubr_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.u.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %b, |
| <vscale x 2 x double> %a) |
| ret <vscale x 2 x double> %out |
| } |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fabd.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fabd.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fabd.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fadd.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fadd.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fadd.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fdiv.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fdiv.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fdiv.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fmax.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmax.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fmax.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fmin.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmin.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fmin.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fminnm.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fminnm.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fminnm.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fmla.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmla.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fmla.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fmls.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmls.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fmls.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fmulx.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmulx.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fmulx.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fnmla.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fnmla.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fnmla.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fnmls.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fnmls.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fnmls.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>) |
| |
| declare <vscale x 8 x half> @llvm.aarch64.sve.fsub.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.fsub.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.fsub.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |