| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mattr=+sve2,+fp8,+fp8fma < %s | FileCheck %s |
| ; RUN: llc -mattr=+sme,+fp8,+ssve-fp8fma --force-streaming < %s | FileCheck %s |
| |
| target triple = "aarch64-linux" |
| |
| define <vscale x 8 x half> @fmla_2way_bot(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_2way_bot: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlalb z0.h, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 8 x half> @llvm.aarch64.sve.fp8.fmlalb.nxv8f16(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) |
| ret <vscale x 8 x half> %r |
| } |
| |
| define <vscale x 8 x half> @fmla_2way_top(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_2way_top: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlalt z0.h, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 8 x half> @llvm.aarch64.sve.fp8.fmlalt.nxv8f16(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) |
| ret <vscale x 8 x half> %r |
| } |
| |
| define <vscale x 8 x half> @fmla_2way_bot_lane(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_2way_bot_lane: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlalb z0.h, z1.b, z2.b[3] |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 8 x half> @llvm.aarch64.sve.fp8.fmlalb.lane.nxv8f16(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3) |
| ret <vscale x 8 x half> %r |
| } |
| |
| define <vscale x 8 x half> @fmla_2way_top_lane(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_2way_top_lane: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlalt z0.h, z1.b, z2.b[3] |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 8 x half> @llvm.aarch64.sve.fp8.fmlalt.lane.nxv8f16(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3) |
| ret <vscale x 8 x half> %r |
| } |
| |
| define <vscale x 4 x float> @fmla_4way_bb(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_4way_bb: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlallbb z0.s, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlallbb.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) |
| ret <vscale x 4 x float> %r |
| } |
| |
| define <vscale x 4 x float> @fmla_4way_bt(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_4way_bt: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlallbt z0.s, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlallbt.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) |
| ret <vscale x 4 x float> %r |
| } |
| |
| define <vscale x 4 x float> @fmla_4way_tb(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_4way_tb: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlalltb z0.s, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlalltb.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) |
| ret <vscale x 4 x float> %r |
| } |
| |
| define <vscale x 4 x float> @fmla_4way_tt(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_4way_tt: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlalltt z0.s, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlalltt.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) |
| ret <vscale x 4 x float> %r |
| } |
| |
| define <vscale x 4 x float> @fmla_4way_bb_lane(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_4way_bb_lane: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlallbb z0.s, z1.b, z2.b[3] |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlallbb.lane.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3) |
| ret <vscale x 4 x float> %r |
| } |
| |
| define <vscale x 4 x float> @fmla_4way_bt_lane(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_4way_bt_lane: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlallbt z0.s, z1.b, z2.b[3] |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlallbt.lane.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3) |
| ret <vscale x 4 x float> %r |
| } |
| |
| define <vscale x 4 x float> @fmla_4way_tb_lane(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_4way_tb_lane: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlalltb z0.s, z1.b, z2.b[3] |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlalltb.lane.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3) |
| ret <vscale x 4 x float> %r |
| } |
| |
| define <vscale x 4 x float> @fmla_4way_tt_lane(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) { |
| ; CHECK-LABEL: fmla_4way_tt_lane: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fmlalltt z0.s, z1.b, z2.b[3] |
| ; CHECK-NEXT: ret |
| %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlalltt.lane.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3) |
| ret <vscale x 4 x float> %r |
| } |
| |